vmalloc.c 19.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter's avatar
Christoph Lameter committed
8
 *  Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds's avatar
Linus Torvalds committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>

#include <linux/vmalloc.h>

#include <asm/uaccess.h>
#include <asm/tlbflush.h>


DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;

27
28
29
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
			    int node);

Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
						unsigned long end)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
		vunmap_pte_range(pmd, addr, next);
	} while (pmd++, addr = next, addr != end);
}

static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
						unsigned long end)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		vunmap_pmd_range(pud, addr, next);
	} while (pud++, addr = next, addr != end);
}

71
void unmap_kernel_range(unsigned long addr, unsigned long size)
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
{
	pgd_t *pgd;
	unsigned long next;
75
76
	unsigned long start = addr;
	unsigned long end = addr + size;
Linus Torvalds's avatar
Linus Torvalds committed
77
78
79
80
81
82
83
84
85
86

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	flush_cache_vunmap(addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		vunmap_pud_range(pgd, addr, next);
	} while (pgd++, addr = next, addr != end);
87
88
89
90
91
92
	flush_tlb_kernel_range(start, end);
}

static void unmap_vm_area(struct vm_struct *area)
{
	unmap_kernel_range((unsigned long)area->addr, area->size);
Linus Torvalds's avatar
Linus Torvalds committed
93
94
95
96
97
98
99
}

static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
			unsigned long end, pgprot_t prot, struct page ***pages)
{
	pte_t *pte;

100
	pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds's avatar
Linus Torvalds committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
	if (!pte)
		return -ENOMEM;
	do {
		struct page *page = **pages;
		WARN_ON(!pte_none(*pte));
		if (!page)
			return -ENOMEM;
		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
		(*pages)++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
			unsigned long end, pgprot_t prot, struct page ***pages)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (vmap_pte_range(pmd, addr, next, prot, pages))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
			unsigned long end, pgprot_t prot, struct page ***pages)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (vmap_pmd_range(pud, addr, next, prot, pages))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long addr = (unsigned long) area->addr;
	unsigned long end = addr + area->size - PAGE_SIZE;
	int err;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		err = vmap_pud_range(pgd, addr, next, prot, pages);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	flush_cache_vmap((unsigned long) area->addr, end);
	return err;
}
167
EXPORT_SYMBOL_GPL(map_vm_area);
Linus Torvalds's avatar
Linus Torvalds committed
168

169
170
171
static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
					    unsigned long start, unsigned long end,
					    int node, gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
172
173
174
175
176
{
	struct vm_struct **p, *tmp, *area;
	unsigned long align = 1;
	unsigned long addr;

177
	BUG_ON(in_interrupt());
Linus Torvalds's avatar
Linus Torvalds committed
178
179
180
181
182
183
184
185
186
187
188
189
	if (flags & VM_IOREMAP) {
		int bit = fls(size);

		if (bit > IOREMAP_MAX_ORDER)
			bit = IOREMAP_MAX_ORDER;
		else if (bit < PAGE_SHIFT)
			bit = PAGE_SHIFT;

		align = 1ul << bit;
	}
	addr = ALIGN(start, align);
	size = PAGE_ALIGN(size);
190
191
	if (unlikely(!size))
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
192

Christoph Lameter's avatar
Christoph Lameter committed
193
194
	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);

Linus Torvalds's avatar
Linus Torvalds committed
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
	if (unlikely(!area))
		return NULL;

	/*
	 * We always allocate a guard page.
	 */
	size += PAGE_SIZE;

	write_lock(&vmlist_lock);
	for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
		if ((unsigned long)tmp->addr < addr) {
			if((unsigned long)tmp->addr + tmp->size >= addr)
				addr = ALIGN(tmp->size + 
					     (unsigned long)tmp->addr, align);
			continue;
		}
		if ((size + addr) < addr)
			goto out;
		if (size + addr <= (unsigned long)tmp->addr)
			goto found;
		addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
		if (addr > end - size)
			goto out;
	}

found:
	area->next = *p;
	*p = area;

	area->flags = flags;
	area->addr = (void *)addr;
	area->size = size;
	area->pages = NULL;
	area->nr_pages = 0;
	area->phys_addr = 0;
	write_unlock(&vmlist_lock);

	return area;

out:
	write_unlock(&vmlist_lock);
	kfree(area);
	if (printk_ratelimit())
		printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
	return NULL;
}

Christoph Lameter's avatar
Christoph Lameter committed
242
243
244
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
				unsigned long start, unsigned long end)
{
245
	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
Christoph Lameter's avatar
Christoph Lameter committed
246
}
247
EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter's avatar
Christoph Lameter committed
248

Linus Torvalds's avatar
Linus Torvalds committed
249
/**
Simon Arlott's avatar
Simon Arlott committed
250
 *	get_vm_area  -  reserve a contiguous kernel virtual area
Linus Torvalds's avatar
Linus Torvalds committed
251
252
253
254
255
256
257
258
259
260
261
262
 *	@size:		size of the area
 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
 *
 *	Search an area of @size in the kernel virtual mapping area,
 *	and reserved it for out purposes.  Returns the area descriptor
 *	on success or %NULL on failure.
 */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
	return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
}

263
264
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
				   int node, gfp_t gfp_mask)
Christoph Lameter's avatar
Christoph Lameter committed
265
{
266
267
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
				  gfp_mask);
Christoph Lameter's avatar
Christoph Lameter committed
268
269
}

270
271
272
273
274
275
276
277
278
279
280
281
282
/* Caller must hold vmlist_lock */
static struct vm_struct *__find_vm_area(void *addr)
{
	struct vm_struct *tmp;

	for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
		 if (tmp->addr == addr)
			break;
	}

	return tmp;
}

283
/* Caller must hold vmlist_lock */
284
static struct vm_struct *__remove_vm_area(void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
{
	struct vm_struct **p, *tmp;

	for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
		 if (tmp->addr == addr)
			 goto found;
	}
	return NULL;

found:
	unmap_vm_area(tmp);
	*p = tmp->next;

	/*
	 * Remove the guard page.
	 */
	tmp->size -= PAGE_SIZE;
	return tmp;
}

305
/**
Simon Arlott's avatar
Simon Arlott committed
306
 *	remove_vm_area  -  find and remove a continuous kernel virtual area
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
struct vm_struct *remove_vm_area(void *addr)
{
	struct vm_struct *v;
	write_lock(&vmlist_lock);
	v = __remove_vm_area(addr);
	write_unlock(&vmlist_lock);
	return v;
}

Benjamin Herrenschmidt's avatar
Benjamin Herrenschmidt committed
322
static void __vunmap(void *addr, int deallocate_pages)
Linus Torvalds's avatar
Linus Torvalds committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
{
	struct vm_struct *area;

	if (!addr)
		return;

	if ((PAGE_SIZE-1) & (unsigned long)addr) {
		printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
		WARN_ON(1);
		return;
	}

	area = remove_vm_area(addr);
	if (unlikely(!area)) {
		printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
				addr);
		WARN_ON(1);
		return;
	}

343
344
	debug_check_no_locks_freed(addr, area->size);

Linus Torvalds's avatar
Linus Torvalds committed
345
346
347
348
	if (deallocate_pages) {
		int i;

		for (i = 0; i < area->nr_pages; i++) {
349
			BUG_ON(!area->pages[i]);
Linus Torvalds's avatar
Linus Torvalds committed
350
351
352
			__free_page(area->pages[i]);
		}

353
		if (area->flags & VM_VPAGES)
Linus Torvalds's avatar
Linus Torvalds committed
354
355
356
357
358
359
360
361
362
363
364
365
366
			vfree(area->pages);
		else
			kfree(area->pages);
	}

	kfree(area);
	return;
}

/**
 *	vfree  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
Simon Arlott's avatar
Simon Arlott committed
367
 *	Free the virtually continuous memory area starting at @addr, as
368
369
 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 *	NULL, no operation is performed.
Linus Torvalds's avatar
Linus Torvalds committed
370
 *
371
 *	Must not be called in interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
 */
void vfree(void *addr)
{
	BUG_ON(in_interrupt());
	__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);

/**
 *	vunmap  -  release virtual mapping obtained by vmap()
 *	@addr:		memory base address
 *
 *	Free the virtually contiguous memory area starting at @addr,
 *	which was created from the page array passed to vmap().
 *
387
 *	Must not be called in interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
 */
void vunmap(void *addr)
{
	BUG_ON(in_interrupt());
	__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);

/**
 *	vmap  -  map an array of pages into virtually contiguous space
 *	@pages:		array of page pointers
 *	@count:		number of pages to map
 *	@flags:		vm_area->flags
 *	@prot:		page protection for the mapping
 *
 *	Maps @count pages from @pages into contiguous kernel virtual
 *	space.
 */
void *vmap(struct page **pages, unsigned int count,
		unsigned long flags, pgprot_t prot)
{
	struct vm_struct *area;

	if (count > num_physpages)
		return NULL;

	area = get_vm_area((count << PAGE_SHIFT), flags);
	if (!area)
		return NULL;
	if (map_vm_area(area, prot, &pages)) {
		vunmap(area->addr);
		return NULL;
	}

	return area->addr;
}
EXPORT_SYMBOL(vmap);

Christoph Lameter's avatar
Christoph Lameter committed
426
427
void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
				pgprot_t prot, int node)
Linus Torvalds's avatar
Linus Torvalds committed
428
429
430
431
432
433
434
435
436
{
	struct page **pages;
	unsigned int nr_pages, array_size, i;

	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
	array_size = (nr_pages * sizeof(struct page *));

	area->nr_pages = nr_pages;
	/* Please note that the recursion is strictly bounded. */
437
	if (array_size > PAGE_SIZE) {
438
439
		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
					PAGE_KERNEL, node);
440
		area->flags |= VM_VPAGES;
441
442
	} else {
		pages = kmalloc_node(array_size,
Christoph Lameter's avatar
Christoph Lameter committed
443
				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
444
445
				node);
	}
Linus Torvalds's avatar
Linus Torvalds committed
446
447
448
449
450
451
452
453
	area->pages = pages;
	if (!area->pages) {
		remove_vm_area(area->addr);
		kfree(area);
		return NULL;
	}

	for (i = 0; i < area->nr_pages; i++) {
Christoph Lameter's avatar
Christoph Lameter committed
454
455
456
457
		if (node < 0)
			area->pages[i] = alloc_page(gfp_mask);
		else
			area->pages[i] = alloc_pages_node(node, gfp_mask, 0);
Linus Torvalds's avatar
Linus Torvalds committed
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
		if (unlikely(!area->pages[i])) {
			/* Successfully allocated i pages, free them in __vunmap() */
			area->nr_pages = i;
			goto fail;
		}
	}

	if (map_vm_area(area, prot, &pages))
		goto fail;
	return area->addr;

fail:
	vfree(area->addr);
	return NULL;
}

Christoph Lameter's avatar
Christoph Lameter committed
474
475
476
477
478
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
	return __vmalloc_area_node(area, gfp_mask, prot, -1);
}

Linus Torvalds's avatar
Linus Torvalds committed
479
/**
Christoph Lameter's avatar
Christoph Lameter committed
480
 *	__vmalloc_node  -  allocate virtually contiguous memory
Linus Torvalds's avatar
Linus Torvalds committed
481
482
483
 *	@size:		allocation size
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
484
 *	@node:		node to use for allocation or -1
Linus Torvalds's avatar
Linus Torvalds committed
485
486
487
488
489
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
 */
490
491
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
			    int node)
Linus Torvalds's avatar
Linus Torvalds committed
492
493
494
495
496
497
498
{
	struct vm_struct *area;

	size = PAGE_ALIGN(size);
	if (!size || (size >> PAGE_SHIFT) > num_physpages)
		return NULL;

499
	area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
500
501
502
	if (!area)
		return NULL;

Christoph Lameter's avatar
Christoph Lameter committed
503
	return __vmalloc_area_node(area, gfp_mask, prot, node);
Linus Torvalds's avatar
Linus Torvalds committed
504
505
}

Christoph Lameter's avatar
Christoph Lameter committed
506
507
508
509
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
	return __vmalloc_node(size, gfp_mask, prot, -1);
}
Linus Torvalds's avatar
Linus Torvalds committed
510
511
512
513
514
515
516
517
EXPORT_SYMBOL(__vmalloc);

/**
 *	vmalloc  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
518
 *	For tight control over page level allocator and protection flags
Linus Torvalds's avatar
Linus Torvalds committed
519
520
521
522
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
523
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
524
525
526
}
EXPORT_SYMBOL(vmalloc);

527
/**
528
529
 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
 * @size: allocation size
530
 *
531
532
 * The resulting memory area is zeroed so it can be mapped to userspace
 * without leaking data.
533
534
535
536
537
538
539
 */
void *vmalloc_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
540
541
542
543
544
545
	if (ret) {
		write_lock(&vmlist_lock);
		area = __find_vm_area(ret);
		area->flags |= VM_USERMAP;
		write_unlock(&vmlist_lock);
	}
546
547
548
549
	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

Christoph Lameter's avatar
Christoph Lameter committed
550
551
552
/**
 *	vmalloc_node  -  allocate memory on a specific node
 *	@size:		allocation size
553
 *	@node:		numa node
Christoph Lameter's avatar
Christoph Lameter committed
554
555
556
557
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
558
 *	For tight control over page level allocator and protection flags
Christoph Lameter's avatar
Christoph Lameter committed
559
560
561
562
 *	use __vmalloc() instead.
 */
void *vmalloc_node(unsigned long size, int node)
{
563
	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
Christoph Lameter's avatar
Christoph Lameter committed
564
565
566
}
EXPORT_SYMBOL(vmalloc_node);

567
568
569
570
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
571
572
573
574
575
576
577
578
/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
579
 *	For tight control over page level allocator and protection flags
Linus Torvalds's avatar
Linus Torvalds committed
580
581
582
583
584
585
586
587
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

588
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
589
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
590
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
591
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
592
593
594
595
#else
#define GFP_VMALLOC32 GFP_KERNEL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
596
597
598
599
600
601
602
603
604
/**
 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into contiguous kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
605
	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
606
607
608
}
EXPORT_SYMBOL(vmalloc_32);

609
/**
610
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
611
 *	@size:		allocation size
612
613
614
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
615
616
617
618
619
620
 */
void *vmalloc_32_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

621
	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
622
623
624
625
626
627
	if (ret) {
		write_lock(&vmlist_lock);
		area = __find_vm_area(ret);
		area->flags |= VM_USERMAP;
		write_unlock(&vmlist_lock);
	}
628
629
630
631
	return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);

Linus Torvalds's avatar
Linus Torvalds committed
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
long vread(char *buf, char *addr, unsigned long count)
{
	struct vm_struct *tmp;
	char *vaddr, *buf_start = buf;
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	read_lock(&vmlist_lock);
	for (tmp = vmlist; tmp; tmp = tmp->next) {
		vaddr = (char *) tmp->addr;
		if (addr >= vaddr + tmp->size - PAGE_SIZE)
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			*buf = '\0';
			buf++;
			addr++;
			count--;
		}
		n = vaddr + tmp->size - PAGE_SIZE - addr;
		do {
			if (count == 0)
				goto finished;
			*buf = *addr;
			buf++;
			addr++;
			count--;
		} while (--n > 0);
	}
finished:
	read_unlock(&vmlist_lock);
	return buf - buf_start;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	struct vm_struct *tmp;
	char *vaddr, *buf_start = buf;
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	read_lock(&vmlist_lock);
	for (tmp = vmlist; tmp; tmp = tmp->next) {
		vaddr = (char *) tmp->addr;
		if (addr >= vaddr + tmp->size - PAGE_SIZE)
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			buf++;
			addr++;
			count--;
		}
		n = vaddr + tmp->size - PAGE_SIZE - addr;
		do {
			if (count == 0)
				goto finished;
			*addr = *buf;
			buf++;
			addr++;
			count--;
		} while (--n > 0);
	}
finished:
	read_unlock(&vmlist_lock);
	return buf - buf_start;
}
706
707
708
709
710
711
712
713
714
715
716
717

/**
 *	remap_vmalloc_range  -  map vmalloc pages to userspace
 *	@vma:		vma to cover (map full range of vma)
 *	@addr:		vmalloc memory
 *	@pgoff:		number of pages into addr before first page to map
 *	@returns:	0 for success, -Exxx on failure
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	that it is big enough to cover the vma. Will return failure if
 *	that criteria isn't met.
 *
718
 *	Similar to remap_pfn_range() (see mm/memory.c)
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
 */
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
						unsigned long pgoff)
{
	struct vm_struct *area;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;
	int ret;

	if ((PAGE_SIZE-1) & (unsigned long)addr)
		return -EINVAL;

	read_lock(&vmlist_lock);
	area = __find_vm_area(addr);
	if (!area)
		goto out_einval_locked;

	if (!(area->flags & VM_USERMAP))
		goto out_einval_locked;

	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
		goto out_einval_locked;
	read_unlock(&vmlist_lock);

	addr += pgoff << PAGE_SHIFT;
	do {
		struct page *page = vmalloc_to_page(addr);
		ret = vm_insert_page(vma, uaddr, page);
		if (ret)
			return ret;

		uaddr += PAGE_SIZE;
		addr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);

	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
	vma->vm_flags |= VM_RESERVED;

	return ret;

out_einval_locked:
	read_unlock(&vmlist_lock);
	return -EINVAL;
}
EXPORT_SYMBOL(remap_vmalloc_range);

766
767
768
769
770
771
772
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
void  __attribute__((weak)) vmalloc_sync_all(void)
{
}
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825


static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
{
	/* apply_to_page_range() does all the hard work. */
	return 0;
}

/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
 *	@returns:	NULL on failure, vm_struct on success
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
struct vm_struct *alloc_vm_area(size_t size)
{
	struct vm_struct *area;

	area = get_vm_area(size, VM_IOREMAP);
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
				area->size, f, NULL)) {
		free_vm_area(area);
		return NULL;
	}

	/* Make sure the pagetables are constructed in process kernel
	   mappings */
	vmalloc_sync_all();

	return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	struct vm_struct *ret;
	ret = remove_vm_area(area->addr);
	BUG_ON(ret != area);
	kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);