vmalloc.c 22.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter's avatar
Christoph Lameter committed
8
 *  Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds's avatar
Linus Torvalds committed
9
10
11
12
13
14
15
16
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
17
#include <linux/seq_file.h>
18
#include <linux/debugobjects.h>
Linus Torvalds's avatar
Linus Torvalds committed
19
#include <linux/vmalloc.h>
20
#include <linux/kallsyms.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
22
23
24
25
26
27
28

#include <asm/uaccess.h>
#include <asm/tlbflush.h>


DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;

29
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
30
			    int node, void *caller);
31

Linus Torvalds's avatar
Linus Torvalds committed
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
						unsigned long end)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
		vunmap_pte_range(pmd, addr, next);
	} while (pmd++, addr = next, addr != end);
}

static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
						unsigned long end)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		vunmap_pmd_range(pud, addr, next);
	} while (pud++, addr = next, addr != end);
}

73
void unmap_kernel_range(unsigned long addr, unsigned long size)
Linus Torvalds's avatar
Linus Torvalds committed
74
75
76
{
	pgd_t *pgd;
	unsigned long next;
77
78
	unsigned long start = addr;
	unsigned long end = addr + size;
Linus Torvalds's avatar
Linus Torvalds committed
79
80
81
82
83
84
85
86
87
88

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	flush_cache_vunmap(addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		vunmap_pud_range(pgd, addr, next);
	} while (pgd++, addr = next, addr != end);
89
90
91
92
93
94
	flush_tlb_kernel_range(start, end);
}

static void unmap_vm_area(struct vm_struct *area)
{
	unmap_kernel_range((unsigned long)area->addr, area->size);
Linus Torvalds's avatar
Linus Torvalds committed
95
96
97
98
99
100
101
}

static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
			unsigned long end, pgprot_t prot, struct page ***pages)
{
	pte_t *pte;

102
	pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds's avatar
Linus Torvalds committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
	if (!pte)
		return -ENOMEM;
	do {
		struct page *page = **pages;
		WARN_ON(!pte_none(*pte));
		if (!page)
			return -ENOMEM;
		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
		(*pages)++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
			unsigned long end, pgprot_t prot, struct page ***pages)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (vmap_pte_range(pmd, addr, next, prot, pages))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
			unsigned long end, pgprot_t prot, struct page ***pages)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (vmap_pmd_range(pud, addr, next, prot, pages))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long addr = (unsigned long) area->addr;
	unsigned long end = addr + area->size - PAGE_SIZE;
	int err;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = vmap_pud_range(pgd, addr, next, prot, pages);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	flush_cache_vmap((unsigned long) area->addr, end);
	return err;
}
169
EXPORT_SYMBOL_GPL(map_vm_area);
Linus Torvalds's avatar
Linus Torvalds committed
170

171
172
173
/*
 * Map a vmalloc()-space virtual address to the physical page.
 */
174
struct page *vmalloc_to_page(const void *vmalloc_addr)
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;

	if (!pgd_none(*pgd)) {
		pud = pud_offset(pgd, addr);
		if (!pud_none(*pud)) {
			pmd = pmd_offset(pud, addr);
			if (!pmd_none(*pmd)) {
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}
EXPORT_SYMBOL(vmalloc_to_page);

/*
 * Map a vmalloc()-space virtual address to the physical page frame number.
 */
203
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
204
205
206
207
208
{
	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
}
EXPORT_SYMBOL(vmalloc_to_pfn);

209
210
211
static struct vm_struct *
__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
		unsigned long end, int node, gfp_t gfp_mask, void *caller)
Linus Torvalds's avatar
Linus Torvalds committed
212
213
214
215
216
{
	struct vm_struct **p, *tmp, *area;
	unsigned long align = 1;
	unsigned long addr;

217
	BUG_ON(in_interrupt());
Linus Torvalds's avatar
Linus Torvalds committed
218
219
220
221
222
223
224
225
226
227
228
229
	if (flags & VM_IOREMAP) {
		int bit = fls(size);

		if (bit > IOREMAP_MAX_ORDER)
			bit = IOREMAP_MAX_ORDER;
		else if (bit < PAGE_SHIFT)
			bit = PAGE_SHIFT;

		align = 1ul << bit;
	}
	addr = ALIGN(start, align);
	size = PAGE_ALIGN(size);
230
231
	if (unlikely(!size))
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
232

Christoph Lameter's avatar
Christoph Lameter committed
233
234
	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);

Linus Torvalds's avatar
Linus Torvalds committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
	if (unlikely(!area))
		return NULL;

	/*
	 * We always allocate a guard page.
	 */
	size += PAGE_SIZE;

	write_lock(&vmlist_lock);
	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
		if ((unsigned long)tmp->addr < addr) {
			if((unsigned long)tmp->addr + tmp->size >= addr)
				addr = ALIGN(tmp->size + 
					     (unsigned long)tmp->addr, align);
			continue;
		}
		if ((size + addr) < addr)
			goto out;
		if (size + addr <= (unsigned long)tmp->addr)
			goto found;
		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
		if (addr > end - size)
			goto out;
	}
259
260
261
262
	if ((size + addr) < addr)
		goto out;
	if (addr > end - size)
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
263
264
265
266
267
268
269
270
271
272
273

found:
	area->next = *p;
	*p = area;

	area->flags = flags;
	area->addr = (void *)addr;
	area->size = size;
	area->pages = NULL;
	area->nr_pages = 0;
	area->phys_addr = 0;
274
	area->caller = caller;
Linus Torvalds's avatar
Linus Torvalds committed
275
276
277
278
279
280
281
282
283
284
285
286
	write_unlock(&vmlist_lock);

	return area;

out:
	write_unlock(&vmlist_lock);
	kfree(area);
	if (printk_ratelimit())
		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
	return NULL;
}

Christoph Lameter's avatar
Christoph Lameter committed
287
288
289
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
				unsigned long start, unsigned long end)
{
290
291
	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
						__builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
292
}
293
EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter's avatar
Christoph Lameter committed
294

Linus Torvalds's avatar
Linus Torvalds committed
295
/**
Simon Arlott's avatar
Simon Arlott committed
296
 *	get_vm_area  -  reserve a contiguous kernel virtual area
Linus Torvalds's avatar
Linus Torvalds committed
297
298
299
300
301
302
303
304
305
 *	@size:		size of the area
 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
 *
 *	Search an area of @size in the kernel virtual mapping area,
 *	and reserved it for out purposes.  Returns the area descriptor
 *	on success or %NULL on failure.
 */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
306
307
308
309
310
311
312
313
314
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
				-1, GFP_KERNEL, __builtin_return_address(0));
}

struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
				void *caller)
{
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
						-1, GFP_KERNEL, caller);
Linus Torvalds's avatar
Linus Torvalds committed
315
316
}

317
318
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
				   int node, gfp_t gfp_mask)
Christoph Lameter's avatar
Christoph Lameter committed
319
{
320
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
321
				  gfp_mask, __builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
322
323
}

324
/* Caller must hold vmlist_lock */
325
static struct vm_struct *__find_vm_area(const void *addr)
326
327
328
329
330
331
332
333
334
335
336
{
	struct vm_struct *tmp;

	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
		 if (tmp->addr == addr)
			break;
	}

	return tmp;
}

337
/* Caller must hold vmlist_lock */
338
static struct vm_struct *__remove_vm_area(const void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
{
	struct vm_struct **p, *tmp;

	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
		 if (tmp->addr == addr)
			 goto found;
	}
	return NULL;

found:
	unmap_vm_area(tmp);
	*p = tmp->next;

	/*
	 * Remove the guard page.
	 */
	tmp->size -= PAGE_SIZE;
	return tmp;
}

359
/**
Simon Arlott's avatar
Simon Arlott committed
360
 *	remove_vm_area  -  find and remove a continuous kernel virtual area
361
362
363
364
365
366
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
367
struct vm_struct *remove_vm_area(const void *addr)
368
369
370
371
372
373
374
375
{
	struct vm_struct *v;
	write_lock(&vmlist_lock);
	v = __remove_vm_area(addr);
	write_unlock(&vmlist_lock);
	return v;
}

376
static void __vunmap(const void *addr, int deallocate_pages)
Linus Torvalds's avatar
Linus Torvalds committed
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
{
	struct vm_struct *area;

	if (!addr)
		return;

	if ((PAGE_SIZE-1) & (unsigned long)addr) {
		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
		WARN_ON(1);
		return;
	}

	area = remove_vm_area(addr);
	if (unlikely(!area)) {
		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
				addr);
		WARN_ON(1);
		return;
	}

397
	debug_check_no_locks_freed(addr, area->size);
398
	debug_check_no_obj_freed(addr, area->size);
399

Linus Torvalds's avatar
Linus Torvalds committed
400
401
402
403
	if (deallocate_pages) {
		int i;

		for (i = 0; i < area->nr_pages; i++) {
404
405
406
407
			struct page *page = area->pages[i];

			BUG_ON(!page);
			__free_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
408
409
		}

410
		if (area->flags & VM_VPAGES)
Linus Torvalds's avatar
Linus Torvalds committed
411
412
413
414
415
416
417
418
419
420
421
422
423
			vfree(area->pages);
		else
			kfree(area->pages);
	}

	kfree(area);
	return;
}

/**
 *	vfree  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
Simon Arlott's avatar
Simon Arlott committed
424
 *	Free the virtually continuous memory area starting at @addr, as
425
426
 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 *	NULL, no operation is performed.
Linus Torvalds's avatar
Linus Torvalds committed
427
 *
428
 *	Must not be called in interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
429
 */
430
void vfree(const void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
431
432
433
434
435
436
437
438
439
440
441
442
443
{
	BUG_ON(in_interrupt());
	__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);

/**
 *	vunmap  -  release virtual mapping obtained by vmap()
 *	@addr:		memory base address
 *
 *	Free the virtually contiguous memory area starting at @addr,
 *	which was created from the page array passed to vmap().
 *
444
 *	Must not be called in interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
445
 */
446
void vunmap(const void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
{
	BUG_ON(in_interrupt());
	__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);

/**
 *	vmap  -  map an array of pages into virtually contiguous space
 *	@pages:		array of page pointers
 *	@count:		number of pages to map
 *	@flags:		vm_area->flags
 *	@prot:		page protection for the mapping
 *
 *	Maps @count pages from @pages into contiguous kernel virtual
 *	space.
 */
void *vmap(struct page **pages, unsigned int count,
		unsigned long flags, pgprot_t prot)
{
	struct vm_struct *area;

	if (count > num_physpages)
		return NULL;

471
472
	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
					__builtin_return_address(0));
Linus Torvalds's avatar
Linus Torvalds committed
473
474
	if (!area)
		return NULL;
475

Linus Torvalds's avatar
Linus Torvalds committed
476
477
478
479
480
481
482
483
484
	if (map_vm_area(area, prot, &pages)) {
		vunmap(area->addr);
		return NULL;
	}

	return area->addr;
}
EXPORT_SYMBOL(vmap);

485
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
486
				 pgprot_t prot, int node, void *caller)
Linus Torvalds's avatar
Linus Torvalds committed
487
488
489
490
491
492
493
494
495
{
	struct page **pages;
	unsigned int nr_pages, array_size, i;

	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
	array_size = (nr_pages * sizeof(struct page *));

	area->nr_pages = nr_pages;
	/* Please note that the recursion is strictly bounded. */
496
	if (array_size > PAGE_SIZE) {
497
		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
498
				PAGE_KERNEL, node, caller);
499
		area->flags |= VM_VPAGES;
500
501
	} else {
		pages = kmalloc_node(array_size,
Christoph Lameter's avatar
Christoph Lameter committed
502
				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
503
504
				node);
	}
Linus Torvalds's avatar
Linus Torvalds committed
505
	area->pages = pages;
506
	area->caller = caller;
Linus Torvalds's avatar
Linus Torvalds committed
507
508
509
510
511
512
513
	if (!area->pages) {
		remove_vm_area(area->addr);
		kfree(area);
		return NULL;
	}

	for (i = 0; i < area->nr_pages; i++) {
514
515
		struct page *page;

Christoph Lameter's avatar
Christoph Lameter committed
516
		if (node < 0)
517
			page = alloc_page(gfp_mask);
Christoph Lameter's avatar
Christoph Lameter committed
518
		else
519
520
521
			page = alloc_pages_node(node, gfp_mask, 0);

		if (unlikely(!page)) {
Linus Torvalds's avatar
Linus Torvalds committed
522
523
524
525
			/* Successfully allocated i pages, free them in __vunmap() */
			area->nr_pages = i;
			goto fail;
		}
526
		area->pages[i] = page;
Linus Torvalds's avatar
Linus Torvalds committed
527
528
529
530
531
532
533
534
535
536
537
	}

	if (map_vm_area(area, prot, &pages))
		goto fail;
	return area->addr;

fail:
	vfree(area->addr);
	return NULL;
}

Christoph Lameter's avatar
Christoph Lameter committed
538
539
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
540
541
	return __vmalloc_area_node(area, gfp_mask, prot, -1,
					__builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
542
543
}

Linus Torvalds's avatar
Linus Torvalds committed
544
/**
Christoph Lameter's avatar
Christoph Lameter committed
545
 *	__vmalloc_node  -  allocate virtually contiguous memory
Linus Torvalds's avatar
Linus Torvalds committed
546
547
548
 *	@size:		allocation size
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
549
 *	@node:		node to use for allocation or -1
550
 *	@caller:	caller's return address
Linus Torvalds's avatar
Linus Torvalds committed
551
552
553
554
555
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
 */
556
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
557
						int node, void *caller)
Linus Torvalds's avatar
Linus Torvalds committed
558
559
560
561
562
563
564
{
	struct vm_struct *area;

	size = PAGE_ALIGN(size);
	if (!size || (size >> PAGE_SHIFT) > num_physpages)
		return NULL;

565
566
567
	area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
						node, gfp_mask, caller);

Linus Torvalds's avatar
Linus Torvalds committed
568
569
570
	if (!area)
		return NULL;

571
	return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
Linus Torvalds's avatar
Linus Torvalds committed
572
573
}

Christoph Lameter's avatar
Christoph Lameter committed
574
575
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
576
577
	return __vmalloc_node(size, gfp_mask, prot, -1,
				__builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
578
}
Linus Torvalds's avatar
Linus Torvalds committed
579
580
581
582
583
584
585
586
EXPORT_SYMBOL(__vmalloc);

/**
 *	vmalloc  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
587
 *	For tight control over page level allocator and protection flags
Linus Torvalds's avatar
Linus Torvalds committed
588
589
590
591
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
592
593
	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
					-1, __builtin_return_address(0));
Linus Torvalds's avatar
Linus Torvalds committed
594
595
596
}
EXPORT_SYMBOL(vmalloc);

597
/**
598
599
 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
 * @size: allocation size
600
 *
601
602
 * The resulting memory area is zeroed so it can be mapped to userspace
 * without leaking data.
603
604
605
606
607
608
609
 */
void *vmalloc_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
610
611
612
613
614
615
	if (ret) {
		write_lock(&vmlist_lock);
		area = __find_vm_area(ret);
		area->flags |= VM_USERMAP;
		write_unlock(&vmlist_lock);
	}
616
617
618
619
	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

Christoph Lameter's avatar
Christoph Lameter committed
620
621
622
/**
 *	vmalloc_node  -  allocate memory on a specific node
 *	@size:		allocation size
623
 *	@node:		numa node
Christoph Lameter's avatar
Christoph Lameter committed
624
625
626
627
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
628
 *	For tight control over page level allocator and protection flags
Christoph Lameter's avatar
Christoph Lameter committed
629
630
631
632
 *	use __vmalloc() instead.
 */
void *vmalloc_node(unsigned long size, int node)
{
633
634
	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
					node, __builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
635
636
637
}
EXPORT_SYMBOL(vmalloc_node);

638
639
640
641
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
642
643
644
645
646
647
648
649
/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
650
 *	For tight control over page level allocator and protection flags
Linus Torvalds's avatar
Linus Torvalds committed
651
652
653
654
655
656
657
658
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

659
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
660
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
661
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
662
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
663
664
665
666
#else
#define GFP_VMALLOC32 GFP_KERNEL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
667
668
669
670
671
672
673
674
675
/**
 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into contiguous kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
676
	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
677
678
679
}
EXPORT_SYMBOL(vmalloc_32);

680
/**
681
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
682
 *	@size:		allocation size
683
684
685
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
686
687
688
689
690
691
 */
void *vmalloc_32_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

692
	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
693
694
695
696
697
698
	if (ret) {
		write_lock(&vmlist_lock);
		area = __find_vm_area(ret);
		area->flags |= VM_USERMAP;
		write_unlock(&vmlist_lock);
	}
699
700
701
702
	return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);

Linus Torvalds's avatar
Linus Torvalds committed
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
long vread(char *buf, char *addr, unsigned long count)
{
	struct vm_struct *tmp;
	char *vaddr, *buf_start = buf;
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	read_lock(&vmlist_lock);
	for (tmp = vmlist; tmp; tmp = tmp->next) {
		vaddr = (char *) tmp->addr;
		if (addr >= vaddr + tmp->size - PAGE_SIZE)
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			*buf = '\0';
			buf++;
			addr++;
			count--;
		}
		n = vaddr + tmp->size - PAGE_SIZE - addr;
		do {
			if (count == 0)
				goto finished;
			*buf = *addr;
			buf++;
			addr++;
			count--;
		} while (--n > 0);
	}
finished:
	read_unlock(&vmlist_lock);
	return buf - buf_start;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	struct vm_struct *tmp;
	char *vaddr, *buf_start = buf;
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	read_lock(&vmlist_lock);
	for (tmp = vmlist; tmp; tmp = tmp->next) {
		vaddr = (char *) tmp->addr;
		if (addr >= vaddr + tmp->size - PAGE_SIZE)
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			buf++;
			addr++;
			count--;
		}
		n = vaddr + tmp->size - PAGE_SIZE - addr;
		do {
			if (count == 0)
				goto finished;
			*addr = *buf;
			buf++;
			addr++;
			count--;
		} while (--n > 0);
	}
finished:
	read_unlock(&vmlist_lock);
	return buf - buf_start;
}
777
778
779
780
781
782

/**
 *	remap_vmalloc_range  -  map vmalloc pages to userspace
 *	@vma:		vma to cover (map full range of vma)
 *	@addr:		vmalloc memory
 *	@pgoff:		number of pages into addr before first page to map
783
784
 *
 *	Returns:	0 for success, -Exxx on failure
785
786
787
788
789
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	that it is big enough to cover the vma. Will return failure if
 *	that criteria isn't met.
 *
790
 *	Similar to remap_pfn_range() (see mm/memory.c)
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
 */
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
						unsigned long pgoff)
{
	struct vm_struct *area;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
	int ret;

	if ((PAGE_SIZE-1) & (unsigned long)addr)
		return -EINVAL;

	read_lock(&vmlist_lock);
	area = __find_vm_area(addr);
	if (!area)
		goto out_einval_locked;

	if (!(area->flags & VM_USERMAP))
		goto out_einval_locked;

	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
		goto out_einval_locked;
	read_unlock(&vmlist_lock);

	addr += pgoff << PAGE_SHIFT;
	do {
		struct page *page = vmalloc_to_page(addr);
		ret = vm_insert_page(vma, uaddr, page);
		if (ret)
			return ret;

		uaddr += PAGE_SIZE;
		addr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);

	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
	vma->vm_flags |= VM_RESERVED;

	return ret;

out_einval_locked:
	read_unlock(&vmlist_lock);
	return -EINVAL;
}
EXPORT_SYMBOL(remap_vmalloc_range);

838
839
840
841
842
843
844
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
void  __attribute__((weak)) vmalloc_sync_all(void)
{
}
845
846


847
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
848
849
850
851
852
853
854
855
{
	/* apply_to_page_range() does all the hard work. */
	return 0;
}

/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
856
857
 *
 *	Returns:	NULL on failure, vm_struct on success
858
859
860
861
862
863
864
865
866
867
868
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
struct vm_struct *alloc_vm_area(size_t size)
{
	struct vm_struct *area;

869
870
	area = get_vm_area_caller(size, VM_IOREMAP,
				__builtin_return_address(0));
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
				area->size, f, NULL)) {
		free_vm_area(area);
		return NULL;
	}

	/* Make sure the pagetables are constructed in process kernel
	   mappings */
	vmalloc_sync_all();

	return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	struct vm_struct *ret;
	ret = remove_vm_area(area->addr);
	BUG_ON(ret != area);
	kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940


#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
{
	loff_t n = *pos;
	struct vm_struct *v;

	read_lock(&vmlist_lock);
	v = vmlist;
	while (n > 0 && v) {
		n--;
		v = v->next;
	}
	if (!n)
		return v;

	return NULL;

}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
	struct vm_struct *v = p;

	++*pos;
	return v->next;
}

static void s_stop(struct seq_file *m, void *p)
{
	read_unlock(&vmlist_lock);
}

static int s_show(struct seq_file *m, void *p)
{
	struct vm_struct *v = p;

	seq_printf(m, "0x%p-0x%p %7ld",
		v->addr, v->addr + v->size, v->size);

941
942
943
944
945
946
947
948
	if (v->caller) {
		char buff[2 * KSYM_NAME_LEN];

		seq_putc(m, ' ');
		sprint_symbol(buff, (unsigned long)v->caller);
		seq_puts(m, buff);
	}

949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
	if (v->nr_pages)
		seq_printf(m, " pages=%d", v->nr_pages);

	if (v->phys_addr)
		seq_printf(m, " phys=%lx", v->phys_addr);

	if (v->flags & VM_IOREMAP)
		seq_printf(m, " ioremap");

	if (v->flags & VM_ALLOC)
		seq_printf(m, " vmalloc");

	if (v->flags & VM_MAP)
		seq_printf(m, " vmap");

	if (v->flags & VM_USERMAP)
		seq_printf(m, " user");

	if (v->flags & VM_VPAGES)
		seq_printf(m, " vpages");

	seq_putc(m, '\n');
	return 0;
}

const struct seq_operations vmalloc_op = {
	.start = s_start,
	.next = s_next,
	.stop = s_stop,
	.show = s_show,
};
#endif