vmalloc.c 68.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter's avatar
Christoph Lameter committed
8
 *  Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds's avatar
Linus Torvalds committed
9
10
 */

Nick Piggin's avatar
Nick Piggin committed
11
#include <linux/vmalloc.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
13
14
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
15
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
16
17
18
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
19
#include <linux/proc_fs.h>
20
#include <linux/seq_file.h>
21
#include <linux/debugobjects.h>
22
#include <linux/kallsyms.h>
Nick Piggin's avatar
Nick Piggin committed
23
24
25
26
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
27
#include <linux/pfn.h>
28
#include <linux/kmemleak.h>
Arun Sharma's avatar
Arun Sharma committed
29
#include <linux/atomic.h>
30
#include <linux/compiler.h>
31
#include <linux/llist.h>
32

Linus Torvalds's avatar
Linus Torvalds committed
33
34
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
35
#include <asm/shmparam.h>
Linus Torvalds's avatar
Linus Torvalds committed
36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
struct vfree_deferred {
	struct llist_head list;
	struct work_struct wq;
};
static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);

static void __vunmap(const void *, int);

static void free_work(struct work_struct *w)
{
	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
	struct llist_node *llnode = llist_del_all(&p->list);
	while (llnode) {
		void *p = llnode;
		llnode = llist_next(llnode);
		__vunmap(p, 1);
	}
}

Nick Piggin's avatar
Nick Piggin committed
56
/*** Page table manipulation functions ***/
57

Linus Torvalds's avatar
Linus Torvalds committed
58
59
60
61
62
63
64
65
66
67
68
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

Nick Piggin's avatar
Nick Piggin committed
69
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
70
71
72
73
74
75
76
77
78
79
80
81
82
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
		vunmap_pte_range(pmd, addr, next);
	} while (pmd++, addr = next, addr != end);
}

Nick Piggin's avatar
Nick Piggin committed
83
static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
89
90
91
92
93
94
95
96
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		vunmap_pmd_range(pud, addr, next);
	} while (pud++, addr = next, addr != end);
}

Nick Piggin's avatar
Nick Piggin committed
97
static void vunmap_page_range(unsigned long addr, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
{
	pgd_t *pgd;
	unsigned long next;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		vunmap_pud_range(pgd, addr, next);
	} while (pgd++, addr = next, addr != end);
}

static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
Nick Piggin's avatar
Nick Piggin committed
113
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds's avatar
Linus Torvalds committed
114
115
116
{
	pte_t *pte;

Nick Piggin's avatar
Nick Piggin committed
117
118
119
120
121
	/*
	 * nr is a running index into the array which helps higher level
	 * callers keep track of where we're up to.
	 */

122
	pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds's avatar
Linus Torvalds committed
123
124
125
	if (!pte)
		return -ENOMEM;
	do {
Nick Piggin's avatar
Nick Piggin committed
126
127
128
129
130
		struct page *page = pages[*nr];

		if (WARN_ON(!pte_none(*pte)))
			return -EBUSY;
		if (WARN_ON(!page))
Linus Torvalds's avatar
Linus Torvalds committed
131
132
			return -ENOMEM;
		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
Nick Piggin's avatar
Nick Piggin committed
133
		(*nr)++;
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
138
139
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds's avatar
Linus Torvalds committed
140
141
142
143
144
145
146
147
148
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
Nick Piggin's avatar
Nick Piggin committed
149
		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
Linus Torvalds's avatar
Linus Torvalds committed
150
151
152
153
154
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
155
156
static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds's avatar
Linus Torvalds committed
157
158
159
160
161
162
163
164
165
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
Nick Piggin's avatar
Nick Piggin committed
166
		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
Linus Torvalds's avatar
Linus Torvalds committed
167
168
169
170
171
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
172
173
174
175
176
177
/*
 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 * will have pfns corresponding to the "pages" array.
 *
 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 */
178
179
static int vmap_page_range_noflush(unsigned long start, unsigned long end,
				   pgprot_t prot, struct page **pages)
Linus Torvalds's avatar
Linus Torvalds committed
180
181
182
{
	pgd_t *pgd;
	unsigned long next;
183
	unsigned long addr = start;
Nick Piggin's avatar
Nick Piggin committed
184
185
	int err = 0;
	int nr = 0;
Linus Torvalds's avatar
Linus Torvalds committed
186
187
188
189
190

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
Nick Piggin's avatar
Nick Piggin committed
191
		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
Linus Torvalds's avatar
Linus Torvalds committed
192
		if (err)
193
			return err;
Linus Torvalds's avatar
Linus Torvalds committed
194
	} while (pgd++, addr = next, addr != end);
Nick Piggin's avatar
Nick Piggin committed
195
196

	return nr;
Linus Torvalds's avatar
Linus Torvalds committed
197
198
}

199
200
201
202
203
204
205
206
207
208
static int vmap_page_range(unsigned long start, unsigned long end,
			   pgprot_t prot, struct page **pages)
{
	int ret;

	ret = vmap_page_range_noflush(start, end, prot, pages);
	flush_cache_vmap(start, end);
	return ret;
}

209
int is_vmalloc_or_module_addr(const void *x)
210
211
{
	/*
212
	 * ARM, x86-64 and sparc64 put modules in a special place,
213
214
215
216
217
218
219
220
221
222
223
	 * and fall back on vmalloc() if that fails. Others
	 * just put it in the vmalloc space.
	 */
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
	unsigned long addr = (unsigned long)x;
	if (addr >= MODULES_VADDR && addr < MODULES_END)
		return 1;
#endif
	return is_vmalloc_addr(x);
}

224
/*
225
 * Walk a vmap address to the struct page it maps.
226
 */
227
struct page *vmalloc_to_page(const void *vmalloc_addr)
228
229
{
	unsigned long addr = (unsigned long) vmalloc_addr;
230
	struct page *page = NULL;
231
232
	pgd_t *pgd = pgd_offset_k(addr);

233
234
235
236
	/*
	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
	 * architectures that do not vmalloc module space
	 */
237
	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
Jiri Slaby's avatar
Jiri Slaby committed
238

239
	if (!pgd_none(*pgd)) {
Nick Piggin's avatar
Nick Piggin committed
240
		pud_t *pud = pud_offset(pgd, addr);
241
		if (!pud_none(*pud)) {
Nick Piggin's avatar
Nick Piggin committed
242
			pmd_t *pmd = pmd_offset(pud, addr);
243
			if (!pmd_none(*pmd)) {
Nick Piggin's avatar
Nick Piggin committed
244
245
				pte_t *ptep, pte;

246
247
248
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
249
					page = pte_page(pte);
250
251
252
253
				pte_unmap(ptep);
			}
		}
	}
254
	return page;
255
}
256
EXPORT_SYMBOL(vmalloc_to_page);
257
258

/*
259
 * Map a vmalloc()-space virtual address to the physical page frame number.
260
 */
261
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
262
{
263
	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
264
}
265
EXPORT_SYMBOL(vmalloc_to_pfn);
266

Nick Piggin's avatar
Nick Piggin committed
267
268
269
270
271
272
273
274

/*** Global kva allocator ***/

#define VM_LAZY_FREE	0x01
#define VM_LAZY_FREEING	0x02
#define VM_VM_AREA	0x04

static DEFINE_SPINLOCK(vmap_area_lock);
275
276
/* Export for kexec only */
LIST_HEAD(vmap_area_list);
Nick Piggin's avatar
Nick Piggin committed
277
278
279
280
281
282
283
284
static struct rb_root vmap_area_root = RB_ROOT;

/* The vmap cache globals are protected by vmap_area_lock */
static struct rb_node *free_vmap_cache;
static unsigned long cached_hole_size;
static unsigned long cached_vstart;
static unsigned long cached_align;

285
static unsigned long vmap_area_pcpu_hole;
Nick Piggin's avatar
Nick Piggin committed
286
287

static struct vmap_area *__find_vmap_area(unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
288
{
Nick Piggin's avatar
Nick Piggin committed
289
290
291
292
293
294
295
296
	struct rb_node *n = vmap_area_root.rb_node;

	while (n) {
		struct vmap_area *va;

		va = rb_entry(n, struct vmap_area, rb_node);
		if (addr < va->va_start)
			n = n->rb_left;
297
		else if (addr >= va->va_end)
Nick Piggin's avatar
Nick Piggin committed
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
			n = n->rb_right;
		else
			return va;
	}

	return NULL;
}

static void __insert_vmap_area(struct vmap_area *va)
{
	struct rb_node **p = &vmap_area_root.rb_node;
	struct rb_node *parent = NULL;
	struct rb_node *tmp;

	while (*p) {
313
		struct vmap_area *tmp_va;
Nick Piggin's avatar
Nick Piggin committed
314
315

		parent = *p;
316
317
		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
		if (va->va_start < tmp_va->va_end)
Nick Piggin's avatar
Nick Piggin committed
318
			p = &(*p)->rb_left;
319
		else if (va->va_end > tmp_va->va_start)
Nick Piggin's avatar
Nick Piggin committed
320
321
322
323
324
325
326
327
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&va->rb_node, parent, p);
	rb_insert_color(&va->rb_node, &vmap_area_root);

328
	/* address-sort this list */
Nick Piggin's avatar
Nick Piggin committed
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
	tmp = rb_prev(&va->rb_node);
	if (tmp) {
		struct vmap_area *prev;
		prev = rb_entry(tmp, struct vmap_area, rb_node);
		list_add_rcu(&va->list, &prev->list);
	} else
		list_add_rcu(&va->list, &vmap_area_list);
}

static void purge_vmap_area_lazy(void);

/*
 * Allocate a region of KVA of the specified size and alignment, within the
 * vstart and vend.
 */
static struct vmap_area *alloc_vmap_area(unsigned long size,
				unsigned long align,
				unsigned long vstart, unsigned long vend,
				int node, gfp_t gfp_mask)
{
	struct vmap_area *va;
	struct rb_node *n;
Linus Torvalds's avatar
Linus Torvalds committed
351
	unsigned long addr;
Nick Piggin's avatar
Nick Piggin committed
352
	int purged = 0;
Nick Piggin's avatar
Nick Piggin committed
353
	struct vmap_area *first;
Nick Piggin's avatar
Nick Piggin committed
354

Nick Piggin's avatar
Nick Piggin committed
355
	BUG_ON(!size);
Nick Piggin's avatar
Nick Piggin committed
356
	BUG_ON(size & ~PAGE_MASK);
Nick Piggin's avatar
Nick Piggin committed
357
	BUG_ON(!is_power_of_2(align));
Nick Piggin's avatar
Nick Piggin committed
358
359
360
361
362
363

	va = kmalloc_node(sizeof(struct vmap_area),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!va))
		return ERR_PTR(-ENOMEM);

364
365
366
367
368
369
	/*
	 * Only scan the relevant parts containing pointers to other objects
	 * to avoid false negatives.
	 */
	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);

Nick Piggin's avatar
Nick Piggin committed
370
371
retry:
	spin_lock(&vmap_area_lock);
Nick Piggin's avatar
Nick Piggin committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
	/*
	 * Invalidate cache if we have more permissive parameters.
	 * cached_hole_size notes the largest hole noticed _below_
	 * the vmap_area cached in free_vmap_cache: if size fits
	 * into that hole, we want to scan from vstart to reuse
	 * the hole instead of allocating above free_vmap_cache.
	 * Note that __free_vmap_area may update free_vmap_cache
	 * without updating cached_hole_size or cached_align.
	 */
	if (!free_vmap_cache ||
			size < cached_hole_size ||
			vstart < cached_vstart ||
			align < cached_align) {
nocache:
		cached_hole_size = 0;
		free_vmap_cache = NULL;
	}
	/* record if we encounter less permissive parameters */
	cached_vstart = vstart;
	cached_align = align;

	/* find starting point for our search */
	if (free_vmap_cache) {
		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
396
		addr = ALIGN(first->va_end, align);
Nick Piggin's avatar
Nick Piggin committed
397
398
		if (addr < vstart)
			goto nocache;
399
		if (addr + size < addr)
Nick Piggin's avatar
Nick Piggin committed
400
401
402
403
			goto overflow;

	} else {
		addr = ALIGN(vstart, align);
404
		if (addr + size < addr)
Nick Piggin's avatar
Nick Piggin committed
405
406
407
408
409
410
			goto overflow;

		n = vmap_area_root.rb_node;
		first = NULL;

		while (n) {
Nick Piggin's avatar
Nick Piggin committed
411
412
413
414
			struct vmap_area *tmp;
			tmp = rb_entry(n, struct vmap_area, rb_node);
			if (tmp->va_end >= addr) {
				first = tmp;
Nick Piggin's avatar
Nick Piggin committed
415
416
417
418
				if (tmp->va_start <= addr)
					break;
				n = n->rb_left;
			} else
Nick Piggin's avatar
Nick Piggin committed
419
				n = n->rb_right;
Nick Piggin's avatar
Nick Piggin committed
420
		}
Nick Piggin's avatar
Nick Piggin committed
421
422
423
424

		if (!first)
			goto found;
	}
Nick Piggin's avatar
Nick Piggin committed
425
426

	/* from the starting point, walk areas until a suitable hole is found */
427
	while (addr + size > first->va_start && addr + size <= vend) {
Nick Piggin's avatar
Nick Piggin committed
428
429
		if (addr + cached_hole_size < first->va_start)
			cached_hole_size = first->va_start - addr;
430
		addr = ALIGN(first->va_end, align);
431
		if (addr + size < addr)
Nick Piggin's avatar
Nick Piggin committed
432
433
			goto overflow;

434
		if (list_is_last(&first->list, &vmap_area_list))
Nick Piggin's avatar
Nick Piggin committed
435
			goto found;
436
437
438

		first = list_entry(first->list.next,
				struct vmap_area, list);
Nick Piggin's avatar
Nick Piggin committed
439
440
	}

Nick Piggin's avatar
Nick Piggin committed
441
442
443
found:
	if (addr + size > vend)
		goto overflow;
Nick Piggin's avatar
Nick Piggin committed
444
445
446
447
448

	va->va_start = addr;
	va->va_end = addr + size;
	va->flags = 0;
	__insert_vmap_area(va);
Nick Piggin's avatar
Nick Piggin committed
449
	free_vmap_cache = &va->rb_node;
Nick Piggin's avatar
Nick Piggin committed
450
451
	spin_unlock(&vmap_area_lock);

Nick Piggin's avatar
Nick Piggin committed
452
453
454
455
	BUG_ON(va->va_start & (align-1));
	BUG_ON(va->va_start < vstart);
	BUG_ON(va->va_end > vend);

Nick Piggin's avatar
Nick Piggin committed
456
	return va;
Nick Piggin's avatar
Nick Piggin committed
457
458
459
460
461
462
463
464
465

overflow:
	spin_unlock(&vmap_area_lock);
	if (!purged) {
		purge_vmap_area_lazy();
		purged = 1;
		goto retry;
	}
	if (printk_ratelimit())
466
		pr_warn("vmap allocation for size %lu failed: "
Nick Piggin's avatar
Nick Piggin committed
467
468
469
			"use vmalloc=<size> to increase size.\n", size);
	kfree(va);
	return ERR_PTR(-EBUSY);
Nick Piggin's avatar
Nick Piggin committed
470
471
472
473
474
}

static void __free_vmap_area(struct vmap_area *va)
{
	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
Nick Piggin's avatar
Nick Piggin committed
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490

	if (free_vmap_cache) {
		if (va->va_end < cached_vstart) {
			free_vmap_cache = NULL;
		} else {
			struct vmap_area *cache;
			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
			if (va->va_start <= cache->va_start) {
				free_vmap_cache = rb_prev(&va->rb_node);
				/*
				 * We don't try to update cached_hole_size or
				 * cached_align, but it won't go very wrong.
				 */
			}
		}
	}
Nick Piggin's avatar
Nick Piggin committed
491
492
493
494
	rb_erase(&va->rb_node, &vmap_area_root);
	RB_CLEAR_NODE(&va->rb_node);
	list_del_rcu(&va->list);

495
496
497
498
499
500
501
502
503
	/*
	 * Track the highest possible candidate for pcpu area
	 * allocation.  Areas outside of vmalloc area can be returned
	 * here too, consider only end addresses which fall inside
	 * vmalloc area proper.
	 */
	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);

504
	kfree_rcu(va, rcu_head);
Nick Piggin's avatar
Nick Piggin committed
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
}

/*
 * Free a region of KVA allocated by alloc_vmap_area
 */
static void free_vmap_area(struct vmap_area *va)
{
	spin_lock(&vmap_area_lock);
	__free_vmap_area(va);
	spin_unlock(&vmap_area_lock);
}

/*
 * Clear the pagetable entries of a given vmap_area
 */
static void unmap_vmap_area(struct vmap_area *va)
{
	vunmap_page_range(va->va_start, va->va_end);
}

525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
static void vmap_debug_free_range(unsigned long start, unsigned long end)
{
	/*
	 * Unmap page tables and force a TLB flush immediately if
	 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
	 * bugs similarly to those in linear kernel virtual address
	 * space after a page has been freed.
	 *
	 * All the lazy freeing logic is still retained, in order to
	 * minimise intrusiveness of this debugging feature.
	 *
	 * This is going to be *slow* (linear kernel virtual address
	 * debugging doesn't do a broadcast TLB flush so it is a lot
	 * faster).
	 */
#ifdef CONFIG_DEBUG_PAGEALLOC
	vunmap_page_range(start, end);
	flush_tlb_kernel_range(start, end);
#endif
}

Nick Piggin's avatar
Nick Piggin committed
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
/*
 * lazy_max_pages is the maximum amount of virtual address space we gather up
 * before attempting to purge with a TLB flush.
 *
 * There is a tradeoff here: a larger number will cover more kernel page tables
 * and take slightly longer to purge, but it will linearly reduce the number of
 * global TLB flushes that must be performed. It would seem natural to scale
 * this number up linearly with the number of CPUs (because vmapping activity
 * could also scale linearly with the number of CPUs), however it is likely
 * that in practice, workloads might be constrained in other ways that mean
 * vmap activity will not scale linearly with CPUs. Also, I want to be
 * conservative and not introduce a big latency on huge systems, so go with
 * a less aggressive log scale. It will still be an improvement over the old
 * code, and it will be simple to change the scale factor if we find that it
 * becomes a problem on bigger systems.
 */
static unsigned long lazy_max_pages(void)
{
	unsigned int log;

	log = fls(num_online_cpus());

	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
}

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);

573
574
575
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);

576
577
578
579
580
581
582
583
584
/*
 * called before a call to iounmap() if the caller wants vm_area_struct's
 * immediately freed.
 */
void set_iounmap_nonlazy(void)
{
	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
}

Nick Piggin's avatar
Nick Piggin committed
585
586
587
588
589
590
591
592
593
594
595
596
597
/*
 * Purges all lazily-freed vmap areas.
 *
 * If sync is 0 then don't purge if there is already a purge in progress.
 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 * their own TLB flushing).
 * Returns with *start = min(*start, lowest purged address)
 *              *end = max(*end, highest purged address)
 */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
					int sync, int force_flush)
{
598
	static DEFINE_SPINLOCK(purge_lock);
Nick Piggin's avatar
Nick Piggin committed
599
600
	LIST_HEAD(valist);
	struct vmap_area *va;
601
	struct vmap_area *n_va;
Nick Piggin's avatar
Nick Piggin committed
602
603
604
605
606
607
608
609
	int nr = 0;

	/*
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
	 * should not expect such behaviour. This just simplifies locking for
	 * the case that isn't actually used at the moment anyway.
	 */
	if (!sync && !force_flush) {
610
		if (!spin_trylock(&purge_lock))
Nick Piggin's avatar
Nick Piggin committed
611
612
			return;
	} else
613
		spin_lock(&purge_lock);
Nick Piggin's avatar
Nick Piggin committed
614

615
616
617
	if (sync)
		purge_fragmented_blocks_allcpus();

Nick Piggin's avatar
Nick Piggin committed
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
	rcu_read_lock();
	list_for_each_entry_rcu(va, &vmap_area_list, list) {
		if (va->flags & VM_LAZY_FREE) {
			if (va->va_start < *start)
				*start = va->va_start;
			if (va->va_end > *end)
				*end = va->va_end;
			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
			list_add_tail(&va->purge_list, &valist);
			va->flags |= VM_LAZY_FREEING;
			va->flags &= ~VM_LAZY_FREE;
		}
	}
	rcu_read_unlock();

633
	if (nr)
Nick Piggin's avatar
Nick Piggin committed
634
635
636
637
638
639
640
		atomic_sub(nr, &vmap_lazy_nr);

	if (nr || force_flush)
		flush_tlb_kernel_range(*start, *end);

	if (nr) {
		spin_lock(&vmap_area_lock);
641
		list_for_each_entry_safe(va, n_va, &valist, purge_list)
Nick Piggin's avatar
Nick Piggin committed
642
643
644
			__free_vmap_area(va);
		spin_unlock(&vmap_area_lock);
	}
645
	spin_unlock(&purge_lock);
Nick Piggin's avatar
Nick Piggin committed
646
647
}

Nick Piggin's avatar
Nick Piggin committed
648
649
650
651
652
653
654
655
656
657
658
/*
 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
 * is already purging.
 */
static void try_purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 0, 0);
}

Nick Piggin's avatar
Nick Piggin committed
659
660
661
662
663
664
665
/*
 * Kick off a purge of the outstanding lazy areas.
 */
static void purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

Nick Piggin's avatar
Nick Piggin committed
666
	__purge_vmap_area_lazy(&start, &end, 1, 0);
Nick Piggin's avatar
Nick Piggin committed
667
668
669
}

/*
670
671
672
 * Free a vmap area, caller ensuring that the area has been unmapped
 * and flush_cache_vunmap had been called for the correct range
 * previously.
Nick Piggin's avatar
Nick Piggin committed
673
 */
674
static void free_vmap_area_noflush(struct vmap_area *va)
Nick Piggin's avatar
Nick Piggin committed
675
676
677
678
{
	va->flags |= VM_LAZY_FREE;
	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
Nick Piggin's avatar
Nick Piggin committed
679
		try_purge_vmap_area_lazy();
Nick Piggin's avatar
Nick Piggin committed
680
681
}

682
683
684
685
686
687
688
689
690
691
/*
 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 * called for the correct range previously.
 */
static void free_unmap_vmap_area_noflush(struct vmap_area *va)
{
	unmap_vmap_area(va);
	free_vmap_area_noflush(va);
}

692
693
694
695
696
697
698
699
700
/*
 * Free and unmap a vmap area
 */
static void free_unmap_vmap_area(struct vmap_area *va)
{
	flush_cache_vunmap(va->va_start, va->va_end);
	free_unmap_vmap_area_noflush(va);
}

Nick Piggin's avatar
Nick Piggin committed
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
static struct vmap_area *find_vmap_area(unsigned long addr)
{
	struct vmap_area *va;

	spin_lock(&vmap_area_lock);
	va = __find_vmap_area(addr);
	spin_unlock(&vmap_area_lock);

	return va;
}

static void free_unmap_vmap_area_addr(unsigned long addr)
{
	struct vmap_area *va;

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}


/*** Per cpu kva allocator ***/

/*
 * vmap space is limited especially on 32 bit architectures. Ensure there is
 * room for at least 16 percpu vmap blocks per CPU.
 */
/*
 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
 * instead (we just need a rough idea)
 */
#if BITS_PER_LONG == 32
#define VMALLOC_SPACE		(128UL*1024*1024)
#else
#define VMALLOC_SPACE		(128UL*1024*1024*1024)
#endif

#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
745
746
747
748
#define VMAP_BBMAP_BITS		\
		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
Nick Piggin's avatar
Nick Piggin committed
749
750
751

#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)

752
753
static bool vmap_initialized __read_mostly = false;

Nick Piggin's avatar
Nick Piggin committed
754
755
756
757
758
759
760
761
762
763
struct vmap_block_queue {
	spinlock_t lock;
	struct list_head free;
};

struct vmap_block {
	spinlock_t lock;
	struct vmap_area *va;
	unsigned long free, dirty;
	DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
764
765
	struct list_head free_list;
	struct rcu_head rcu_head;
766
	struct list_head purge;
Nick Piggin's avatar
Nick Piggin committed
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
};

/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);

/*
 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 * in the free path. Could get rid of this if we change the API to return a
 * "cookie" from alloc, to be passed to free. But no big deal yet.
 */
static DEFINE_SPINLOCK(vmap_block_tree_lock);
static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);

/*
 * We should probably have a fallback mechanism to allocate virtual memory
 * out of partially filled vmap blocks. However vmap block sizing should be
 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 * big problem.
 */

static unsigned long addr_to_vb_idx(unsigned long addr)
{
	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
	addr /= VMAP_BLOCK_SIZE;
	return addr;
}

static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
	struct vmap_area *va;
	unsigned long vb_idx;
	int node, err;

	node = numa_node_id();

	vb = kmalloc_node(sizeof(struct vmap_block),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!vb))
		return ERR_PTR(-ENOMEM);

	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
					VMALLOC_START, VMALLOC_END,
					node, gfp_mask);
812
	if (IS_ERR(va)) {
Nick Piggin's avatar
Nick Piggin committed
813
		kfree(vb);
Julia Lawall's avatar
Julia Lawall committed
814
		return ERR_CAST(va);
Nick Piggin's avatar
Nick Piggin committed
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
	}

	err = radix_tree_preload(gfp_mask);
	if (unlikely(err)) {
		kfree(vb);
		free_vmap_area(va);
		return ERR_PTR(err);
	}

	spin_lock_init(&vb->lock);
	vb->va = va;
	vb->free = VMAP_BBMAP_BITS;
	vb->dirty = 0;
	bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
	INIT_LIST_HEAD(&vb->free_list);

	vb_idx = addr_to_vb_idx(va->va_start);
	spin_lock(&vmap_block_tree_lock);
	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(err);
	radix_tree_preload_end();

	vbq = &get_cpu_var(vmap_block_queue);
	spin_lock(&vbq->lock);
840
	list_add_rcu(&vb->free_list, &vbq->free);
Nick Piggin's avatar
Nick Piggin committed
841
	spin_unlock(&vbq->lock);
842
	put_cpu_var(vmap_block_queue);
Nick Piggin's avatar
Nick Piggin committed
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857

	return vb;
}

static void free_vmap_block(struct vmap_block *vb)
{
	struct vmap_block *tmp;
	unsigned long vb_idx;

	vb_idx = addr_to_vb_idx(vb->va->va_start);
	spin_lock(&vmap_block_tree_lock);
	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(tmp != vb);

858
	free_vmap_area_noflush(vb->va);
859
	kfree_rcu(vb, rcu_head);
Nick Piggin's avatar
Nick Piggin committed
860
861
}

862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
static void purge_fragmented_blocks(int cpu)
{
	LIST_HEAD(purge);
	struct vmap_block *vb;
	struct vmap_block *n_vb;
	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);

	rcu_read_lock();
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {

		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
			continue;

		spin_lock(&vb->lock);
		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
			vb->free = 0; /* prevent further allocs after releasing lock */
			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
			bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
			spin_lock(&vbq->lock);
			list_del_rcu(&vb->free_list);
			spin_unlock(&vbq->lock);
			spin_unlock(&vb->lock);
			list_add_tail(&vb->purge, &purge);
		} else
			spin_unlock(&vb->lock);
	}
	rcu_read_unlock();

	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
		list_del(&vb->purge);
		free_vmap_block(vb);
	}
}

static void purge_fragmented_blocks_allcpus(void)
{
	int cpu;

	for_each_possible_cpu(cpu)
		purge_fragmented_blocks(cpu);
}

Nick Piggin's avatar
Nick Piggin committed
904
905
906
907
908
909
910
911
912
static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
	unsigned long addr = 0;
	unsigned int order;

	BUG_ON(size & ~PAGE_MASK);
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
Jan Kara's avatar
Jan Kara committed
913
914
915
916
917
918
919
920
	if (WARN_ON(size == 0)) {
		/*
		 * Allocating 0 bytes isn't what caller wants since
		 * get_order(0) returns funny result. Just warn and terminate
		 * early.
		 */
		return NULL;
	}
Nick Piggin's avatar
Nick Piggin committed
921
922
923
924
925
926
927
928
929
	order = get_order(size);

again:
	rcu_read_lock();
	vbq = &get_cpu_var(vmap_block_queue);
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
		int i;

		spin_lock(&vb->lock);
930
931
932
		if (vb->free < 1UL << order)
			goto next;

933
		i = VMAP_BBMAP_BITS - vb->free;
934
935
936
937
938
939
940
941
942
943
944
945
		addr = vb->va->va_start + (i << PAGE_SHIFT);
		BUG_ON(addr_to_vb_idx(addr) !=
				addr_to_vb_idx(vb->va->va_start));
		vb->free -= 1UL << order;
		if (vb->free == 0) {
			spin_lock(&vbq->lock);
			list_del_rcu(&vb->free_list);
			spin_unlock(&vbq->lock);
		}
		spin_unlock(&vb->lock);
		break;
next:
Nick Piggin's avatar
Nick Piggin committed
946
947
		spin_unlock(&vb->lock);
	}
948

949
	put_cpu_var(vmap_block_queue);
Nick Piggin's avatar
Nick Piggin committed
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
	rcu_read_unlock();

	if (!addr) {
		vb = new_vmap_block(gfp_mask);
		if (IS_ERR(vb))
			return vb;
		goto again;
	}

	return (void *)addr;
}

static void vb_free(const void *addr, unsigned long size)
{
	unsigned long offset;
	unsigned long vb_idx;
	unsigned int order;
	struct vmap_block *vb;

	BUG_ON(size & ~PAGE_MASK);
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
971
972
973

	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);

Nick Piggin's avatar
Nick Piggin committed
974
975
976
977
978
979
980
981
982
983
	order = get_order(size);

	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);

	vb_idx = addr_to_vb_idx((unsigned long)addr);
	rcu_read_lock();
	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
	rcu_read_unlock();
	BUG_ON(!vb);

984
985
	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);

Nick Piggin's avatar
Nick Piggin committed
986
	spin_lock(&vb->lock);
987
	BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
988

Nick Piggin's avatar
Nick Piggin committed
989
990
	vb->dirty += 1UL << order;
	if (vb->dirty == VMAP_BBMAP_BITS) {
991
		BUG_ON(vb->free);
Nick Piggin's avatar
Nick Piggin committed
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
		spin_unlock(&vb->lock);
		free_vmap_block(vb);
	} else
		spin_unlock(&vb->lock);
}

/**
 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
 *
 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
 * to amortize TLB flushing overheads. What this means is that any page you
 * have now, may, in a former life, have been mapped into kernel virtual
 * address by the vmap layer and so there might be some CPUs with TLB entries
 * still referencing that page (additional to the regular 1:1 kernel mapping).
 *
 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
 * be sure that none of the pages we have control over will have any aliases
 * from the vmap layer.
 */
void vm_unmap_aliases(void)
{
	unsigned long start = ULONG_MAX, end = 0;
	int cpu;
	int flush = 0;

1017
1018
1019
	if (unlikely(!vmap_initialized))
		return;

Nick Piggin's avatar
Nick Piggin committed
1020
1021
1022
1023
1024
1025
	for_each_possible_cpu(cpu) {
		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
		struct vmap_block *vb;

		rcu_read_lock();
		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1026
			int i, j;
Nick Piggin's avatar
Nick Piggin committed
1027
1028
1029

			spin_lock(&vb->lock);
			i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1030
			if (i < VMAP_BBMAP_BITS) {
Nick Piggin's avatar
Nick Piggin committed
1031
				unsigned long s, e;
1032
1033
1034
1035

				j = find_last_bit(vb->dirty_map,
							VMAP_BBMAP_BITS);
				j = j + 1; /* need exclusive index */
Nick Piggin's avatar
Nick Piggin committed
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070

				s = vb->va->va_start + (i << PAGE_SHIFT);
				e = vb->va->va_start + (j << PAGE_SHIFT);
				flush = 1;

				if (s < start)
					start = s;
				if (e > end)
					end = e;
			}
			spin_unlock(&vb->lock);
		}
		rcu_read_unlock();
	}

	__purge_vmap_area_lazy(&start, &end, 1, flush);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

/**
 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
 * @mem: the pointer returned by vm_map_ram
 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
 */
void vm_unmap_ram(const void *mem, unsigned int count)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr = (unsigned long)mem;

	BUG_ON(!addr);
	BUG_ON(addr < VMALLOC_START);
	BUG_ON(addr > VMALLOC_END);
	BUG_ON(addr & (PAGE_SIZE-1));

	debug_check_no_locks_freed(mem, size);
1071
	vmap_debug_free_range(addr, addr+size);
Nick Piggin's avatar
Nick Piggin committed
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085

	if (likely(count <= VMAP_MAX_ALLOC))
		vb_free(mem, size);
	else
		free_unmap_vmap_area_addr(addr);
}
EXPORT_SYMBOL(vm_unmap_ram);

/**
 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
 * @pages: an array of pointers to the pages to be mapped
 * @count: number of pages
 * @node: prefer to allocate data structures on this node
 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1086
 *
1087
1088
1089
1090
1091
1092
 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
 * faster than vmap so it's good.  But if you mix long-life and short-life
 * objects with vm_map_ram(), it could consume lots of address space through
 * fragmentation (especially on a 32bit machine).  You could see failures in
 * the end.  Please use this function for short-lived objects.
 *
1093
 * Returns: a pointer to the address that has been mapped, or %NULL on failure
Nick Piggin's avatar
Nick Piggin committed
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
 */
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr;
	void *mem;

	if (likely(count <= VMAP_MAX_ALLOC)) {
		mem = vb_alloc(size, GFP_KERNEL);
		if (IS_ERR(mem))
			return NULL;
		addr = (unsigned long)mem;
	} else {
		struct vmap_area *va;
		va = alloc_vmap_area(size, PAGE_SIZE,
				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
		if (IS_ERR(va))
			return NULL;

		addr = va->va_start;
		mem = (void *)addr;
	}
	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
		vm_unmap_ram(mem, count);
		return NULL;
	}
	return mem;
}
EXPORT_SYMBOL(vm_map_ram);

1124
static struct vm_struct *vmlist __initdata;
Nicolas Pitre's avatar
Nicolas Pitre committed
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
/**
 * vm_area_add_early - add vmap area early during boot
 * @vm: vm_struct to add
 *
 * This function is used to add fixed kernel vm area to vmlist before
 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
 * should contain proper values and the other fields should be zero.
 *
 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
 */
void __init vm_area_add_early(struct vm_struct *vm)
{
	struct vm_struct *tmp, **p;

	BUG_ON(vmap_initialized);
	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
		if (tmp->addr >= vm->addr) {
			BUG_ON(tmp->addr < vm->addr + vm->size);
			break;
		} else
			BUG_ON(tmp->addr + tmp->size > vm->addr);
	}
	vm->next = *p;
	*p = vm;
}

1151
1152
1153
/**
 * vm_area_register_early - register vmap area early during boot
 * @vm: vm_struct to register
1154
 * @align: requested alignment
1155
1156
1157
1158
1159
1160
1161
1162
 *
 * This function is used to register kernel vm area before
 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
 * proper values on entry and other fields should be zero.  On return,
 * vm->addr contains the allocated address.
 *
 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
 */
1163
void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1164
1165
{
	static size_t vm_init_off __initdata;
1166
1167
1168
1169
	unsigned long addr;

	addr = ALIGN(VMALLOC_START + vm_init_off, align);
	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1170

1171
	vm->addr = (void *)addr;
1172

Nicolas Pitre's avatar
Nicolas Pitre committed
1173
	vm_area_add_early(vm);
1174
1175
}

Nick Piggin's avatar
Nick Piggin committed
1176
1177
void __init vmalloc_init(void)
{
Ivan Kokshaysky's avatar
Ivan Kokshaysky committed
1178
1179
	struct vmap_area *va;
	struct vm_struct *tmp;
Nick Piggin's avatar
Nick Piggin committed
1180
1181
1182
1183
	int i;

	for_each_possible_cpu(i) {
		struct vmap_block_queue *vbq;
1184
		struct vfree_deferred *p;
Nick Piggin's avatar
Nick Piggin committed
1185
1186
1187
1188

		vbq = &per_cpu(vmap_block_queue, i);
		spin_lock_init(&vbq->lock);
		INIT_LIST_HEAD(&vbq->free);
1189
1190
1191
		p = &per_cpu(vfree_deferred, i);
		init_llist_head(&p->list);
		INIT_WORK(&p->wq, free_work);
Nick Piggin's avatar
Nick Piggin committed
1192
	}
1193

Ivan Kokshaysky's avatar
Ivan Kokshaysky committed
1194
1195
	/* Import existing vmlist entries. */
	for (tmp = vmlist; tmp; tmp = tmp->next) {
1196
		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1197
		va->flags = VM_VM_AREA;
Ivan Kokshaysky's avatar
Ivan Kokshaysky committed
1198
1199
		va->va_start = (unsigned long)tmp->addr;
		va->va_end = va->va_start + tmp->size;
1200
		va->vm = tmp;
Ivan Kokshaysky's avatar
Ivan Kokshaysky committed
1201
1202
		__insert_vmap_area(va);
	}
1203
1204
1205

	vmap_area_pcpu_hole = VMALLOC_END;

1206
	vmap_initialized = true;
Nick Piggin's avatar
Nick Piggin committed
1207
1208
}

1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
/**
 * map_kernel_range_noflush - map kernel VM area with the specified pages
 * @addr: start of the VM area to map
 * @size: size of the VM area to map
 * @prot: page protection flags to use
 * @pages: pages to map
 *
 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vmap() on to-be-mapped areas
 * before calling this function.
 *
 * RETURNS:
 * The number of pages mapped on success, -errno on failure.
 */
int map_kernel_range_noflush(unsigned long addr, unsigned long size,
			     pgprot_t prot, struct page **pages)
{
	return vmap_page_range_noflush(addr, addr + size, prot, pages);
}

/**
 * unmap_kernel_range_noflush - unmap kernel VM area
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
 * specify should have been allocated using get_vm_area() and its
 * friends.
 *
 * NOTE:
 * This function does NOT do any cache flushing.  The caller is
 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
 * before calling this function and flush_tlb_kernel_range() after.
 */
void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
	vunmap_page_range(addr, addr + size);
}
1252
EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1253
1254
1255
1256
1257
1258
1259
1260
1261

/**
 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
 * @addr: start of the VM area to unmap
 * @size: size of the VM area to unmap
 *
 * Similar to unmap_kernel_range_noflush() but flushes vcache before
 * the unmapping and tlb after.
 */
Nick Piggin's avatar
Nick Piggin committed
1262
1263
1264
void unmap_kernel_range(unsigned long addr, unsigned long size)
{
	unsigned long end = addr + size;
1265
1266

	flush_cache_vunmap(addr, end);
Nick Piggin's avatar
Nick Piggin committed
1267
1268
1269
	vunmap_page_range(addr, end);
	flush_tlb_kernel_range(addr, end);
}
1270
EXPORT_SYMBOL_GPL(unmap_kernel_range);
Nick Piggin's avatar
Nick Piggin committed
1271

1272
int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
Nick Piggin's avatar
Nick Piggin committed
1273
1274
{
	unsigned long addr = (unsigned long)area->addr;
1275
	unsigned long end = addr + get_vm_area_size(area);
Nick Piggin's avatar
Nick Piggin committed
1276
1277
	int err;

1278
	err = vmap_page_range(addr, end, prot, pages);
Nick Piggin's avatar
Nick Piggin committed
1279

1280
	return err > 0 ? 0 : err;
Nick Piggin's avatar
Nick Piggin committed
1281
1282
1283
}
EXPORT_SYMBOL_GPL(map_vm_area);

1284
static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1285
			      unsigned long flags, const void *caller)
1286
{
1287
	spin_lock(&vmap_area_lock);
1288
1289
1290
1291
	vm->flags = flags;
	vm->addr = (void *)va->va_start;
	vm->size = va->va_end - va->va_start;
	vm->caller = caller;
1292
	va->vm = vm;
1293
	va->flags |= VM_VM_AREA;
1294
	spin_unlock(&vmap_area_lock);
1295
}
1296

1297
static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1298
{
1299
	/*
1300
	 * Before removing VM_UNINITIALIZED,
1301
1302
1303
1304
	 * we should make sure that vm has proper values.
	 * Pair with smp_rmb() in show_numa_info().
	 */
	smp_wmb();
1305
	vm->flags &= ~VM_UNINITIALIZED;
1306
1307
}

Nick Piggin's avatar
Nick Piggin committed
1308
static struct vm_struct *__get_vm_area_node(unsigned long size,
1309
		unsigned long align, unsigned long flags, unsigned long start,
1310
		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
Nick Piggin's avatar
Nick Piggin committed
1311
{
1312
	struct vmap_area *va;
Nick Piggin's avatar
Nick Piggin committed
1313
	struct vm_struct *area;
Linus Torvalds's avatar
Linus Torvalds committed
1314

1315
	BUG_ON(in_interrupt());
1316
1317
	if (flags & VM_IOREMAP)
		align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
Nick Piggin's avatar
Nick Piggin committed
1318

Linus Torvalds's avatar
Linus Torvalds committed
1319
	size = PAGE_ALIGN(size);
1320
1321
	if (unlikely(!size))
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1322

1323
	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
Linus Torvalds's avatar
Linus Torvalds committed
1324
1325
1326
	if (unlikely(!area))
		return NULL;

1327
1328
	if (!(flags & VM_NO_GUARD))
		size += PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
1329

Nick Piggin's avatar
Nick Piggin committed
1330
1331
1332
1333
	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
	if (IS_ERR(va)) {
		kfree(area);
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1334
1335
	}

1336
	setup_vmalloc_vm(area, va, flags, caller);
1337

Linus Torvalds's avatar
Linus Torvalds committed
1338
1339
1340
	return area;
}

Christoph Lameter's avatar
Christoph Lameter committed
1341
1342
1343
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
				unsigned long start, unsigned long end)
{
David Rientjes's avatar
David Rientjes committed
1344
1345
	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
				  GFP_KERNEL, __builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
1346
}
1347
EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter's avatar
Christoph Lameter committed
1348

1349
1350
struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
				       unsigned long start, unsigned long end,
1351
				       const void *caller)
1352
{
David Rientjes's avatar
David Rientjes committed
1353
1354
	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
				  GFP_KERNEL, caller);
1355
1356
}

Linus Torvalds's avatar
Linus Torvalds committed
1357
/**
Simon Arlott's avatar
Simon Arlott committed
1358
 *	get_vm_area  -  reserve a contiguous kernel virtual area
Linus Torvalds's avatar
Linus Torvalds committed
1359
1360
1361
1362
1363
1364
1365
1366
1367
 *	@size:		size of the area
 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
 *
 *	Search an area of @size in the kernel virtual mapping area,
 *	and reserved it for out purposes.  Returns the area descriptor
 *	on success or %NULL on failure.
 */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
1368
	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
David Rientjes's avatar
David Rientjes committed
1369
1370
				  NUMA_NO_NODE, GFP_KERNEL,
				  __builtin_return_address(0));
1371
1372
1373
}

struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1374
				const void *caller)
1375
{
1376
	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
David Rientjes's avatar
David Rientjes committed
1377
				  NUMA_NO_NODE, GFP_KERNEL, caller);
Linus Torvalds's avatar
Linus Torvalds committed
1378
1379
}

1380
1381
1382
1383
1384
1385
1386
1387
1388
/**
 *	find_vm_area  -  find a continuous kernel virtual area
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and return it.
 *	It is up to the caller to do all required locking to keep the returned
 *	pointer valid.
 */
struct vm_struct *find_vm_area(const void *addr)
1389
{
Nick Piggin's avatar
Nick Piggin committed
1390
	struct vmap_area *va;
1391

Nick Piggin's avatar
Nick Piggin committed
1392
1393
	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA)
1394
		return va->vm;
Linus Torvalds's avatar
Linus Torvalds committed
1395
1396
1397
1398

	return NULL;
}

1399
/**
Simon Arlott's avatar
Simon Arlott committed
1400
 *	remove_vm_area  -  find and remove a continuous kernel virtual area
1401
1402
1403
1404
1405
1406
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
1407
struct vm_struct *remove_vm_area(const void *addr)
1408
{
Nick Piggin's avatar
Nick Piggin committed
1409
1410
1411
1412
	struct vmap_area *va;

	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA) {
1413
		struct vm_struct *vm = va->vm;
1414

1415
1416
1417
1418
1419
		spin_lock(&vmap_area_lock);
		va->vm = NULL;
		va->flags &= ~VM_VM_AREA;
		spin_unlock(&vmap_area_lock);

1420
1421
1422
1423
		vmap_debug_free_range(va->va_start, va->va_end);
		free_unmap_vmap_area(va);
		vm->size -= PAGE_SIZE;

Nick Piggin's avatar
Nick Piggin committed
1424
1425
1426
		return vm;
	}
	return NULL;
1427
1428
}

1429
static void __vunmap(const void *addr, int deallocate_pages)