vmalloc.c 41 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
/*
 *  linux/mm/vmalloc.c
 *
 *  Copyright (C) 1993  Linus Torvalds
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
Christoph Lameter's avatar
Christoph Lameter committed
8
 *  Numa awareness, Christoph Lameter, SGI, June 2005
Linus Torvalds's avatar
Linus Torvalds committed
9
10
 */

Nick Piggin's avatar
Nick Piggin committed
11
#include <linux/vmalloc.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
13
14
15
16
17
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
18
#include <linux/proc_fs.h>
19
#include <linux/seq_file.h>
20
#include <linux/debugobjects.h>
21
#include <linux/kallsyms.h>
Nick Piggin's avatar
Nick Piggin committed
22
23
24
25
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
26

Nick Piggin's avatar
Nick Piggin committed
27
#include <asm/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
29
30
31
#include <asm/uaccess.h>
#include <asm/tlbflush.h>


Nick Piggin's avatar
Nick Piggin committed
32
/*** Page table manipulation functions ***/
33

Linus Torvalds's avatar
Linus Torvalds committed
34
35
36
37
38
39
40
41
42
43
44
static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
{
	pte_t *pte;

	pte = pte_offset_kernel(pmd, addr);
	do {
		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
	} while (pte++, addr += PAGE_SIZE, addr != end);
}

Nick Piggin's avatar
Nick Piggin committed
45
static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
46
47
48
49
50
51
52
53
54
55
56
57
58
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
		vunmap_pte_range(pmd, addr, next);
	} while (pmd++, addr = next, addr != end);
}

Nick Piggin's avatar
Nick Piggin committed
59
static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
60
61
62
63
64
65
66
67
68
69
70
71
72
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		vunmap_pmd_range(pud, addr, next);
	} while (pud++, addr = next, addr != end);
}

Nick Piggin's avatar
Nick Piggin committed
73
static void vunmap_page_range(unsigned long addr, unsigned long end)
Linus Torvalds's avatar
Linus Torvalds committed
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
{
	pgd_t *pgd;
	unsigned long next;

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	flush_cache_vunmap(addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		vunmap_pud_range(pgd, addr, next);
	} while (pgd++, addr = next, addr != end);
}

static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
Nick Piggin's avatar
Nick Piggin committed
90
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds's avatar
Linus Torvalds committed
91
92
93
{
	pte_t *pte;

Nick Piggin's avatar
Nick Piggin committed
94
95
96
97
98
	/*
	 * nr is a running index into the array which helps higher level
	 * callers keep track of where we're up to.
	 */

99
	pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
	if (!pte)
		return -ENOMEM;
	do {
Nick Piggin's avatar
Nick Piggin committed
103
104
105
106
107
		struct page *page = pages[*nr];

		if (WARN_ON(!pte_none(*pte)))
			return -EBUSY;
		if (WARN_ON(!page))
Linus Torvalds's avatar
Linus Torvalds committed
108
109
			return -ENOMEM;
		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
Nick Piggin's avatar
Nick Piggin committed
110
		(*nr)++;
Linus Torvalds's avatar
Linus Torvalds committed
111
112
113
114
	} while (pte++, addr += PAGE_SIZE, addr != end);
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
115
116
static int vmap_pmd_range(pud_t *pud, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds's avatar
Linus Torvalds committed
117
118
119
120
121
122
123
124
125
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(&init_mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
Nick Piggin's avatar
Nick Piggin committed
126
		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
Linus Torvalds's avatar
Linus Torvalds committed
127
128
129
130
131
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
132
133
static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
Linus Torvalds's avatar
Linus Torvalds committed
134
135
136
137
138
139
140
141
142
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(&init_mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
Nick Piggin's avatar
Nick Piggin committed
143
		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
Linus Torvalds's avatar
Linus Torvalds committed
144
145
146
147
148
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
149
150
151
152
153
154
155
156
/*
 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 * will have pfns corresponding to the "pages" array.
 *
 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 */
static int vmap_page_range(unsigned long addr, unsigned long end,
				pgprot_t prot, struct page **pages)
Linus Torvalds's avatar
Linus Torvalds committed
157
158
159
{
	pgd_t *pgd;
	unsigned long next;
Nick Piggin's avatar
Nick Piggin committed
160
161
	int err = 0;
	int nr = 0;
Linus Torvalds's avatar
Linus Torvalds committed
162
163
164
165
166

	BUG_ON(addr >= end);
	pgd = pgd_offset_k(addr);
	do {
		next = pgd_addr_end(addr, end);
Nick Piggin's avatar
Nick Piggin committed
167
		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
Linus Torvalds's avatar
Linus Torvalds committed
168
169
170
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
Nick Piggin's avatar
Nick Piggin committed
171
172
173
174
175
	flush_cache_vmap(addr, end);

	if (unlikely(err))
		return err;
	return nr;
Linus Torvalds's avatar
Linus Torvalds committed
176
177
}

178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
static inline int is_vmalloc_or_module_addr(const void *x)
{
	/*
	 * x86-64 and sparc64 put modules in a special place,
	 * and fall back on vmalloc() if that fails. Others
	 * just put it in the vmalloc space.
	 */
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
	unsigned long addr = (unsigned long)x;
	if (addr >= MODULES_VADDR && addr < MODULES_END)
		return 1;
#endif
	return is_vmalloc_addr(x);
}

193
/*
Nick Piggin's avatar
Nick Piggin committed
194
 * Walk a vmap address to the struct page it maps.
195
 */
196
struct page *vmalloc_to_page(const void *vmalloc_addr)
197
198
199
200
201
{
	unsigned long addr = (unsigned long) vmalloc_addr;
	struct page *page = NULL;
	pgd_t *pgd = pgd_offset_k(addr);

202
203
204
205
	/*
	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
	 * architectures that do not vmalloc module space
	 */
206
	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
Jiri Slaby's avatar
Jiri Slaby committed
207

208
	if (!pgd_none(*pgd)) {
Nick Piggin's avatar
Nick Piggin committed
209
		pud_t *pud = pud_offset(pgd, addr);
210
		if (!pud_none(*pud)) {
Nick Piggin's avatar
Nick Piggin committed
211
			pmd_t *pmd = pmd_offset(pud, addr);
212
			if (!pmd_none(*pmd)) {
Nick Piggin's avatar
Nick Piggin committed
213
214
				pte_t *ptep, pte;

215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
				ptep = pte_offset_map(pmd, addr);
				pte = *ptep;
				if (pte_present(pte))
					page = pte_page(pte);
				pte_unmap(ptep);
			}
		}
	}
	return page;
}
EXPORT_SYMBOL(vmalloc_to_page);

/*
 * Map a vmalloc()-space virtual address to the physical page frame number.
 */
230
unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
231
232
233
234
235
{
	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
}
EXPORT_SYMBOL(vmalloc_to_pfn);

Nick Piggin's avatar
Nick Piggin committed
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258

/*** Global kva allocator ***/

#define VM_LAZY_FREE	0x01
#define VM_LAZY_FREEING	0x02
#define VM_VM_AREA	0x04

struct vmap_area {
	unsigned long va_start;
	unsigned long va_end;
	unsigned long flags;
	struct rb_node rb_node;		/* address sorted rbtree */
	struct list_head list;		/* address sorted list */
	struct list_head purge_list;	/* "lazy purge" list */
	void *private;
	struct rcu_head rcu_head;
};

static DEFINE_SPINLOCK(vmap_area_lock);
static struct rb_root vmap_area_root = RB_ROOT;
static LIST_HEAD(vmap_area_list);

static struct vmap_area *__find_vmap_area(unsigned long addr)
Linus Torvalds's avatar
Linus Torvalds committed
259
{
Nick Piggin's avatar
Nick Piggin committed
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
	struct rb_node *n = vmap_area_root.rb_node;

	while (n) {
		struct vmap_area *va;

		va = rb_entry(n, struct vmap_area, rb_node);
		if (addr < va->va_start)
			n = n->rb_left;
		else if (addr > va->va_start)
			n = n->rb_right;
		else
			return va;
	}

	return NULL;
}

static void __insert_vmap_area(struct vmap_area *va)
{
	struct rb_node **p = &vmap_area_root.rb_node;
	struct rb_node *parent = NULL;
	struct rb_node *tmp;

	while (*p) {
		struct vmap_area *tmp;

		parent = *p;
		tmp = rb_entry(parent, struct vmap_area, rb_node);
		if (va->va_start < tmp->va_end)
			p = &(*p)->rb_left;
		else if (va->va_end > tmp->va_start)
			p = &(*p)->rb_right;
		else
			BUG();
	}

	rb_link_node(&va->rb_node, parent, p);
	rb_insert_color(&va->rb_node, &vmap_area_root);

	/* address-sort this list so it is usable like the vmlist */
	tmp = rb_prev(&va->rb_node);
	if (tmp) {
		struct vmap_area *prev;
		prev = rb_entry(tmp, struct vmap_area, rb_node);
		list_add_rcu(&va->list, &prev->list);
	} else
		list_add_rcu(&va->list, &vmap_area_list);
}

static void purge_vmap_area_lazy(void);

/*
 * Allocate a region of KVA of the specified size and alignment, within the
 * vstart and vend.
 */
static struct vmap_area *alloc_vmap_area(unsigned long size,
				unsigned long align,
				unsigned long vstart, unsigned long vend,
				int node, gfp_t gfp_mask)
{
	struct vmap_area *va;
	struct rb_node *n;
Linus Torvalds's avatar
Linus Torvalds committed
322
	unsigned long addr;
Nick Piggin's avatar
Nick Piggin committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
	int purged = 0;

	BUG_ON(size & ~PAGE_MASK);

	addr = ALIGN(vstart, align);

	va = kmalloc_node(sizeof(struct vmap_area),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!va))
		return ERR_PTR(-ENOMEM);

retry:
	spin_lock(&vmap_area_lock);
	/* XXX: could have a last_hole cache */
	n = vmap_area_root.rb_node;
	if (n) {
		struct vmap_area *first = NULL;

		do {
			struct vmap_area *tmp;
			tmp = rb_entry(n, struct vmap_area, rb_node);
			if (tmp->va_end >= addr) {
				if (!first && tmp->va_start < addr + size)
					first = tmp;
				n = n->rb_left;
			} else {
				first = tmp;
				n = n->rb_right;
			}
		} while (n);

		if (!first)
			goto found;

		if (first->va_end < addr) {
			n = rb_next(&first->rb_node);
			if (n)
				first = rb_entry(n, struct vmap_area, rb_node);
			else
				goto found;
		}

		while (addr + size >= first->va_start && addr + size <= vend) {
			addr = ALIGN(first->va_end + PAGE_SIZE, align);

			n = rb_next(&first->rb_node);
			if (n)
				first = rb_entry(n, struct vmap_area, rb_node);
			else
				goto found;
		}
	}
found:
	if (addr + size > vend) {
		spin_unlock(&vmap_area_lock);
		if (!purged) {
			purge_vmap_area_lazy();
			purged = 1;
			goto retry;
		}
		if (printk_ratelimit())
			printk(KERN_WARNING "vmap allocation failed: "
				 "use vmalloc=<size> to increase size.\n");
		return ERR_PTR(-EBUSY);
	}

	BUG_ON(addr & (align-1));

	va->va_start = addr;
	va->va_end = addr + size;
	va->flags = 0;
	__insert_vmap_area(va);
	spin_unlock(&vmap_area_lock);

	return va;
}

static void rcu_free_va(struct rcu_head *head)
{
	struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);

	kfree(va);
}

static void __free_vmap_area(struct vmap_area *va)
{
	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
	rb_erase(&va->rb_node, &vmap_area_root);
	RB_CLEAR_NODE(&va->rb_node);
	list_del_rcu(&va->list);

	call_rcu(&va->rcu_head, rcu_free_va);
}

/*
 * Free a region of KVA allocated by alloc_vmap_area
 */
static void free_vmap_area(struct vmap_area *va)
{
	spin_lock(&vmap_area_lock);
	__free_vmap_area(va);
	spin_unlock(&vmap_area_lock);
}

/*
 * Clear the pagetable entries of a given vmap_area
 */
static void unmap_vmap_area(struct vmap_area *va)
{
	vunmap_page_range(va->va_start, va->va_end);
}

/*
 * lazy_max_pages is the maximum amount of virtual address space we gather up
 * before attempting to purge with a TLB flush.
 *
 * There is a tradeoff here: a larger number will cover more kernel page tables
 * and take slightly longer to purge, but it will linearly reduce the number of
 * global TLB flushes that must be performed. It would seem natural to scale
 * this number up linearly with the number of CPUs (because vmapping activity
 * could also scale linearly with the number of CPUs), however it is likely
 * that in practice, workloads might be constrained in other ways that mean
 * vmap activity will not scale linearly with CPUs. Also, I want to be
 * conservative and not introduce a big latency on huge systems, so go with
 * a less aggressive log scale. It will still be an improvement over the old
 * code, and it will be simple to change the scale factor if we find that it
 * becomes a problem on bigger systems.
 */
static unsigned long lazy_max_pages(void)
{
	unsigned int log;

	log = fls(num_online_cpus());

	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
}

static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);

/*
 * Purges all lazily-freed vmap areas.
 *
 * If sync is 0 then don't purge if there is already a purge in progress.
 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 * their own TLB flushing).
 * Returns with *start = min(*start, lowest purged address)
 *              *end = max(*end, highest purged address)
 */
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
					int sync, int force_flush)
{
	static DEFINE_SPINLOCK(purge_lock);
	LIST_HEAD(valist);
	struct vmap_area *va;
	int nr = 0;

	/*
	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
	 * should not expect such behaviour. This just simplifies locking for
	 * the case that isn't actually used at the moment anyway.
	 */
	if (!sync && !force_flush) {
		if (!spin_trylock(&purge_lock))
			return;
	} else
		spin_lock(&purge_lock);

	rcu_read_lock();
	list_for_each_entry_rcu(va, &vmap_area_list, list) {
		if (va->flags & VM_LAZY_FREE) {
			if (va->va_start < *start)
				*start = va->va_start;
			if (va->va_end > *end)
				*end = va->va_end;
			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
			unmap_vmap_area(va);
			list_add_tail(&va->purge_list, &valist);
			va->flags |= VM_LAZY_FREEING;
			va->flags &= ~VM_LAZY_FREE;
		}
	}
	rcu_read_unlock();

	if (nr) {
		BUG_ON(nr > atomic_read(&vmap_lazy_nr));
		atomic_sub(nr, &vmap_lazy_nr);
	}

	if (nr || force_flush)
		flush_tlb_kernel_range(*start, *end);

	if (nr) {
		spin_lock(&vmap_area_lock);
		list_for_each_entry(va, &valist, purge_list)
			__free_vmap_area(va);
		spin_unlock(&vmap_area_lock);
	}
	spin_unlock(&purge_lock);
}

/*
 * Kick off a purge of the outstanding lazy areas.
 */
static void purge_vmap_area_lazy(void)
{
	unsigned long start = ULONG_MAX, end = 0;

	__purge_vmap_area_lazy(&start, &end, 0, 0);
}

/*
 * Free and unmap a vmap area
 */
static void free_unmap_vmap_area(struct vmap_area *va)
{
	va->flags |= VM_LAZY_FREE;
	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
		purge_vmap_area_lazy();
}

static struct vmap_area *find_vmap_area(unsigned long addr)
{
	struct vmap_area *va;

	spin_lock(&vmap_area_lock);
	va = __find_vmap_area(addr);
	spin_unlock(&vmap_area_lock);

	return va;
}

static void free_unmap_vmap_area_addr(unsigned long addr)
{
	struct vmap_area *va;

	va = find_vmap_area(addr);
	BUG_ON(!va);
	free_unmap_vmap_area(va);
}


/*** Per cpu kva allocator ***/

/*
 * vmap space is limited especially on 32 bit architectures. Ensure there is
 * room for at least 16 percpu vmap blocks per CPU.
 */
/*
 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
 * instead (we just need a rough idea)
 */
#if BITS_PER_LONG == 32
#define VMALLOC_SPACE		(128UL*1024*1024)
#else
#define VMALLOC_SPACE		(128UL*1024*1024*1024)
#endif

#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
#define VMAP_BBMAP_BITS		VMAP_MIN(VMAP_BBMAP_BITS_MAX,		\
					VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
						VMALLOC_PAGES / NR_CPUS / 16))

#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)

struct vmap_block_queue {
	spinlock_t lock;
	struct list_head free;
	struct list_head dirty;
	unsigned int nr_dirty;
};

struct vmap_block {
	spinlock_t lock;
	struct vmap_area *va;
	struct vmap_block_queue *vbq;
	unsigned long free, dirty;
	DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
	DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
	union {
		struct {
			struct list_head free_list;
			struct list_head dirty_list;
		};
		struct rcu_head rcu_head;
	};
};

/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);

/*
 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 * in the free path. Could get rid of this if we change the API to return a
 * "cookie" from alloc, to be passed to free. But no big deal yet.
 */
static DEFINE_SPINLOCK(vmap_block_tree_lock);
static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);

/*
 * We should probably have a fallback mechanism to allocate virtual memory
 * out of partially filled vmap blocks. However vmap block sizing should be
 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 * big problem.
 */

static unsigned long addr_to_vb_idx(unsigned long addr)
{
	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
	addr /= VMAP_BLOCK_SIZE;
	return addr;
}

static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
	struct vmap_area *va;
	unsigned long vb_idx;
	int node, err;

	node = numa_node_id();

	vb = kmalloc_node(sizeof(struct vmap_block),
			gfp_mask & GFP_RECLAIM_MASK, node);
	if (unlikely(!vb))
		return ERR_PTR(-ENOMEM);

	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
					VMALLOC_START, VMALLOC_END,
					node, gfp_mask);
	if (unlikely(IS_ERR(va))) {
		kfree(vb);
		return ERR_PTR(PTR_ERR(va));
	}

	err = radix_tree_preload(gfp_mask);
	if (unlikely(err)) {
		kfree(vb);
		free_vmap_area(va);
		return ERR_PTR(err);
	}

	spin_lock_init(&vb->lock);
	vb->va = va;
	vb->free = VMAP_BBMAP_BITS;
	vb->dirty = 0;
	bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
	bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
	INIT_LIST_HEAD(&vb->free_list);
	INIT_LIST_HEAD(&vb->dirty_list);

	vb_idx = addr_to_vb_idx(va->va_start);
	spin_lock(&vmap_block_tree_lock);
	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(err);
	radix_tree_preload_end();

	vbq = &get_cpu_var(vmap_block_queue);
	vb->vbq = vbq;
	spin_lock(&vbq->lock);
	list_add(&vb->free_list, &vbq->free);
	spin_unlock(&vbq->lock);
	put_cpu_var(vmap_cpu_blocks);

	return vb;
}

static void rcu_free_vb(struct rcu_head *head)
{
	struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);

	kfree(vb);
}

static void free_vmap_block(struct vmap_block *vb)
{
	struct vmap_block *tmp;
	unsigned long vb_idx;

	spin_lock(&vb->vbq->lock);
	if (!list_empty(&vb->free_list))
		list_del(&vb->free_list);
	if (!list_empty(&vb->dirty_list))
		list_del(&vb->dirty_list);
	spin_unlock(&vb->vbq->lock);

	vb_idx = addr_to_vb_idx(vb->va->va_start);
	spin_lock(&vmap_block_tree_lock);
	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
	spin_unlock(&vmap_block_tree_lock);
	BUG_ON(tmp != vb);

	free_unmap_vmap_area(vb->va);
	call_rcu(&vb->rcu_head, rcu_free_vb);
}

static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
{
	struct vmap_block_queue *vbq;
	struct vmap_block *vb;
	unsigned long addr = 0;
	unsigned int order;

	BUG_ON(size & ~PAGE_MASK);
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
	order = get_order(size);

again:
	rcu_read_lock();
	vbq = &get_cpu_var(vmap_block_queue);
	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
		int i;

		spin_lock(&vb->lock);
		i = bitmap_find_free_region(vb->alloc_map,
						VMAP_BBMAP_BITS, order);

		if (i >= 0) {
			addr = vb->va->va_start + (i << PAGE_SHIFT);
			BUG_ON(addr_to_vb_idx(addr) !=
					addr_to_vb_idx(vb->va->va_start));
			vb->free -= 1UL << order;
			if (vb->free == 0) {
				spin_lock(&vbq->lock);
				list_del_init(&vb->free_list);
				spin_unlock(&vbq->lock);
			}
			spin_unlock(&vb->lock);
			break;
		}
		spin_unlock(&vb->lock);
	}
	put_cpu_var(vmap_cpu_blocks);
	rcu_read_unlock();

	if (!addr) {
		vb = new_vmap_block(gfp_mask);
		if (IS_ERR(vb))
			return vb;
		goto again;
	}

	return (void *)addr;
}

static void vb_free(const void *addr, unsigned long size)
{
	unsigned long offset;
	unsigned long vb_idx;
	unsigned int order;
	struct vmap_block *vb;

	BUG_ON(size & ~PAGE_MASK);
	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
	order = get_order(size);

	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);

	vb_idx = addr_to_vb_idx((unsigned long)addr);
	rcu_read_lock();
	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
	rcu_read_unlock();
	BUG_ON(!vb);

	spin_lock(&vb->lock);
	bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
	if (!vb->dirty) {
		spin_lock(&vb->vbq->lock);
		list_add(&vb->dirty_list, &vb->vbq->dirty);
		spin_unlock(&vb->vbq->lock);
	}
	vb->dirty += 1UL << order;
	if (vb->dirty == VMAP_BBMAP_BITS) {
		BUG_ON(vb->free || !list_empty(&vb->free_list));
		spin_unlock(&vb->lock);
		free_vmap_block(vb);
	} else
		spin_unlock(&vb->lock);
}

/**
 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
 *
 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
 * to amortize TLB flushing overheads. What this means is that any page you
 * have now, may, in a former life, have been mapped into kernel virtual
 * address by the vmap layer and so there might be some CPUs with TLB entries
 * still referencing that page (additional to the regular 1:1 kernel mapping).
 *
 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
 * be sure that none of the pages we have control over will have any aliases
 * from the vmap layer.
 */
void vm_unmap_aliases(void)
{
	unsigned long start = ULONG_MAX, end = 0;
	int cpu;
	int flush = 0;

	for_each_possible_cpu(cpu) {
		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
		struct vmap_block *vb;

		rcu_read_lock();
		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
			int i;

			spin_lock(&vb->lock);
			i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
			while (i < VMAP_BBMAP_BITS) {
				unsigned long s, e;
				int j;
				j = find_next_zero_bit(vb->dirty_map,
					VMAP_BBMAP_BITS, i);

				s = vb->va->va_start + (i << PAGE_SHIFT);
				e = vb->va->va_start + (j << PAGE_SHIFT);
				vunmap_page_range(s, e);
				flush = 1;

				if (s < start)
					start = s;
				if (e > end)
					end = e;

				i = j;
				i = find_next_bit(vb->dirty_map,
							VMAP_BBMAP_BITS, i);
			}
			spin_unlock(&vb->lock);
		}
		rcu_read_unlock();
	}

	__purge_vmap_area_lazy(&start, &end, 1, flush);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);

/**
 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
 * @mem: the pointer returned by vm_map_ram
 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
 */
void vm_unmap_ram(const void *mem, unsigned int count)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr = (unsigned long)mem;

	BUG_ON(!addr);
	BUG_ON(addr < VMALLOC_START);
	BUG_ON(addr > VMALLOC_END);
	BUG_ON(addr & (PAGE_SIZE-1));

	debug_check_no_locks_freed(mem, size);

	if (likely(count <= VMAP_MAX_ALLOC))
		vb_free(mem, size);
	else
		free_unmap_vmap_area_addr(addr);
}
EXPORT_SYMBOL(vm_unmap_ram);

/**
 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
 * @pages: an array of pointers to the pages to be mapped
 * @count: number of pages
 * @node: prefer to allocate data structures on this node
 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
 * @returns: a pointer to the address that has been mapped, or NULL on failure
 */
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
{
	unsigned long size = count << PAGE_SHIFT;
	unsigned long addr;
	void *mem;

	if (likely(count <= VMAP_MAX_ALLOC)) {
		mem = vb_alloc(size, GFP_KERNEL);
		if (IS_ERR(mem))
			return NULL;
		addr = (unsigned long)mem;
	} else {
		struct vmap_area *va;
		va = alloc_vmap_area(size, PAGE_SIZE,
				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
		if (IS_ERR(va))
			return NULL;

		addr = va->va_start;
		mem = (void *)addr;
	}
	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
		vm_unmap_ram(mem, count);
		return NULL;
	}
	return mem;
}
EXPORT_SYMBOL(vm_map_ram);

void __init vmalloc_init(void)
{
	int i;

	for_each_possible_cpu(i) {
		struct vmap_block_queue *vbq;

		vbq = &per_cpu(vmap_block_queue, i);
		spin_lock_init(&vbq->lock);
		INIT_LIST_HEAD(&vbq->free);
		INIT_LIST_HEAD(&vbq->dirty);
		vbq->nr_dirty = 0;
	}
}

void unmap_kernel_range(unsigned long addr, unsigned long size)
{
	unsigned long end = addr + size;
	vunmap_page_range(addr, end);
	flush_tlb_kernel_range(addr, end);
}

int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
{
	unsigned long addr = (unsigned long)area->addr;
	unsigned long end = addr + area->size - PAGE_SIZE;
	int err;

	err = vmap_page_range(addr, end, prot, *pages);
	if (err > 0) {
		*pages += err;
		err = 0;
	}

	return err;
}
EXPORT_SYMBOL_GPL(map_vm_area);

/*** Old vmalloc interfaces ***/
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;

static struct vm_struct *__get_vm_area_node(unsigned long size,
		unsigned long flags, unsigned long start, unsigned long end,
		int node, gfp_t gfp_mask, void *caller)
{
	static struct vmap_area *va;
	struct vm_struct *area;
	struct vm_struct *tmp, **p;
	unsigned long align = 1;
Linus Torvalds's avatar
Linus Torvalds committed
981

982
	BUG_ON(in_interrupt());
Linus Torvalds's avatar
Linus Torvalds committed
983
984
985
986
987
988
989
990
991
992
	if (flags & VM_IOREMAP) {
		int bit = fls(size);

		if (bit > IOREMAP_MAX_ORDER)
			bit = IOREMAP_MAX_ORDER;
		else if (bit < PAGE_SHIFT)
			bit = PAGE_SHIFT;

		align = 1ul << bit;
	}
Nick Piggin's avatar
Nick Piggin committed
993

Linus Torvalds's avatar
Linus Torvalds committed
994
	size = PAGE_ALIGN(size);
995
996
	if (unlikely(!size))
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
997

Christoph Lameter's avatar
Christoph Lameter committed
998
	area = kmalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
Linus Torvalds's avatar
Linus Torvalds committed
999
1000
1001
1002
1003
1004
1005
1006
	if (unlikely(!area))
		return NULL;

	/*
	 * We always allocate a guard page.
	 */
	size += PAGE_SIZE;

Nick Piggin's avatar
Nick Piggin committed
1007
1008
1009
1010
	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
	if (IS_ERR(va)) {
		kfree(area);
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1011
1012
1013
	}

	area->flags = flags;
Nick Piggin's avatar
Nick Piggin committed
1014
	area->addr = (void *)va->va_start;
Linus Torvalds's avatar
Linus Torvalds committed
1015
1016
1017
1018
	area->size = size;
	area->pages = NULL;
	area->nr_pages = 0;
	area->phys_addr = 0;
1019
	area->caller = caller;
Nick Piggin's avatar
Nick Piggin committed
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
	va->private = area;
	va->flags |= VM_VM_AREA;

	write_lock(&vmlist_lock);
	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
		if (tmp->addr >= area->addr)
			break;
	}
	area->next = *p;
	*p = area;
Linus Torvalds's avatar
Linus Torvalds committed
1030
1031
1032
1033
1034
	write_unlock(&vmlist_lock);

	return area;
}

Christoph Lameter's avatar
Christoph Lameter committed
1035
1036
1037
struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
				unsigned long start, unsigned long end)
{
1038
1039
	return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
						__builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
1040
}
1041
EXPORT_SYMBOL_GPL(__get_vm_area);
Christoph Lameter's avatar
Christoph Lameter committed
1042

Linus Torvalds's avatar
Linus Torvalds committed
1043
/**
Simon Arlott's avatar
Simon Arlott committed
1044
 *	get_vm_area  -  reserve a contiguous kernel virtual area
Linus Torvalds's avatar
Linus Torvalds committed
1045
1046
1047
1048
1049
1050
1051
1052
1053
 *	@size:		size of the area
 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
 *
 *	Search an area of @size in the kernel virtual mapping area,
 *	and reserved it for out purposes.  Returns the area descriptor
 *	on success or %NULL on failure.
 */
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
1054
1055
1056
1057
1058
1059
1060
1061
1062
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
				-1, GFP_KERNEL, __builtin_return_address(0));
}

struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
				void *caller)
{
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
						-1, GFP_KERNEL, caller);
Linus Torvalds's avatar
Linus Torvalds committed
1063
1064
}

1065
1066
struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
				   int node, gfp_t gfp_mask)
Christoph Lameter's avatar
Christoph Lameter committed
1067
{
1068
	return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
1069
				  gfp_mask, __builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
1070
1071
}

Nick Piggin's avatar
Nick Piggin committed
1072
static struct vm_struct *find_vm_area(const void *addr)
1073
{
Nick Piggin's avatar
Nick Piggin committed
1074
	struct vmap_area *va;
1075

Nick Piggin's avatar
Nick Piggin committed
1076
1077
1078
	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA)
		return va->private;
Linus Torvalds's avatar
Linus Torvalds committed
1079
1080
1081
1082

	return NULL;
}

1083
/**
Simon Arlott's avatar
Simon Arlott committed
1084
 *	remove_vm_area  -  find and remove a continuous kernel virtual area
1085
1086
1087
1088
1089
1090
 *	@addr:		base address
 *
 *	Search for the kernel VM area starting at @addr, and remove it.
 *	This function returns the found VM area, but using it is NOT safe
 *	on SMP machines, except for its size or flags.
 */
1091
struct vm_struct *remove_vm_area(const void *addr)
1092
{
Nick Piggin's avatar
Nick Piggin committed
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
	struct vmap_area *va;

	va = find_vmap_area((unsigned long)addr);
	if (va && va->flags & VM_VM_AREA) {
		struct vm_struct *vm = va->private;
		struct vm_struct *tmp, **p;
		free_unmap_vmap_area(va);
		vm->size -= PAGE_SIZE;

		write_lock(&vmlist_lock);
		for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
			;
		*p = tmp->next;
		write_unlock(&vmlist_lock);

		return vm;
	}
	return NULL;
1111
1112
}

1113
static void __vunmap(const void *addr, int deallocate_pages)
Linus Torvalds's avatar
Linus Torvalds committed
1114
1115
1116
1117
1118
1119
1120
{
	struct vm_struct *area;

	if (!addr)
		return;

	if ((PAGE_SIZE-1) & (unsigned long)addr) {
Arjan van de Ven's avatar
Arjan van de Ven committed
1121
		WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
Linus Torvalds's avatar
Linus Torvalds committed
1122
1123
1124
1125
1126
		return;
	}

	area = remove_vm_area(addr);
	if (unlikely(!area)) {
Arjan van de Ven's avatar
Arjan van de Ven committed
1127
		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
Linus Torvalds's avatar
Linus Torvalds committed
1128
1129
1130
1131
				addr);
		return;
	}

1132
	debug_check_no_locks_freed(addr, area->size);
1133
	debug_check_no_obj_freed(addr, area->size);
1134

Linus Torvalds's avatar
Linus Torvalds committed
1135
1136
1137
1138
	if (deallocate_pages) {
		int i;

		for (i = 0; i < area->nr_pages; i++) {
1139
1140
1141
1142
			struct page *page = area->pages[i];

			BUG_ON(!page);
			__free_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1143
1144
		}

1145
		if (area->flags & VM_VPAGES)
Linus Torvalds's avatar
Linus Torvalds committed
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
			vfree(area->pages);
		else
			kfree(area->pages);
	}

	kfree(area);
	return;
}

/**
 *	vfree  -  release memory allocated by vmalloc()
 *	@addr:		memory base address
 *
Simon Arlott's avatar
Simon Arlott committed
1159
 *	Free the virtually continuous memory area starting at @addr, as
1160
1161
 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
 *	NULL, no operation is performed.
Linus Torvalds's avatar
Linus Torvalds committed
1162
 *
1163
 *	Must not be called in interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
1164
 */
1165
void vfree(const void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
{
	BUG_ON(in_interrupt());
	__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);

/**
 *	vunmap  -  release virtual mapping obtained by vmap()
 *	@addr:		memory base address
 *
 *	Free the virtually contiguous memory area starting at @addr,
 *	which was created from the page array passed to vmap().
 *
1179
 *	Must not be called in interrupt context.
Linus Torvalds's avatar
Linus Torvalds committed
1180
 */
1181
void vunmap(const void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
{
	BUG_ON(in_interrupt());
	__vunmap(addr, 0);
}
EXPORT_SYMBOL(vunmap);

/**
 *	vmap  -  map an array of pages into virtually contiguous space
 *	@pages:		array of page pointers
 *	@count:		number of pages to map
 *	@flags:		vm_area->flags
 *	@prot:		page protection for the mapping
 *
 *	Maps @count pages from @pages into contiguous kernel virtual
 *	space.
 */
void *vmap(struct page **pages, unsigned int count,
		unsigned long flags, pgprot_t prot)
{
	struct vm_struct *area;

	if (count > num_physpages)
		return NULL;

1206
1207
	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
					__builtin_return_address(0));
Linus Torvalds's avatar
Linus Torvalds committed
1208
1209
	if (!area)
		return NULL;
1210

Linus Torvalds's avatar
Linus Torvalds committed
1211
1212
1213
1214
1215
1216
1217
1218
1219
	if (map_vm_area(area, prot, &pages)) {
		vunmap(area->addr);
		return NULL;
	}

	return area->addr;
}
EXPORT_SYMBOL(vmap);

Nick Piggin's avatar
Nick Piggin committed
1220
1221
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
			    int node, void *caller);
1222
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1223
				 pgprot_t prot, int node, void *caller)
Linus Torvalds's avatar
Linus Torvalds committed
1224
1225
1226
1227
1228
1229
1230
1231
1232
{
	struct page **pages;
	unsigned int nr_pages, array_size, i;

	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
	array_size = (nr_pages * sizeof(struct page *));

	area->nr_pages = nr_pages;
	/* Please note that the recursion is strictly bounded. */
1233
	if (array_size > PAGE_SIZE) {
1234
		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
1235
				PAGE_KERNEL, node, caller);
1236
		area->flags |= VM_VPAGES;
1237
1238
	} else {
		pages = kmalloc_node(array_size,
Christoph Lameter's avatar
Christoph Lameter committed
1239
				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1240
1241
				node);
	}
Linus Torvalds's avatar
Linus Torvalds committed
1242
	area->pages = pages;
1243
	area->caller = caller;
Linus Torvalds's avatar
Linus Torvalds committed
1244
1245
1246
1247
1248
1249
1250
	if (!area->pages) {
		remove_vm_area(area->addr);
		kfree(area);
		return NULL;
	}

	for (i = 0; i < area->nr_pages; i++) {
1251
1252
		struct page *page;

Christoph Lameter's avatar
Christoph Lameter committed
1253
		if (node < 0)
1254
			page = alloc_page(gfp_mask);
Christoph Lameter's avatar
Christoph Lameter committed
1255
		else
1256
1257
1258
			page = alloc_pages_node(node, gfp_mask, 0);

		if (unlikely(!page)) {
Linus Torvalds's avatar
Linus Torvalds committed
1259
1260
1261
1262
			/* Successfully allocated i pages, free them in __vunmap() */
			area->nr_pages = i;
			goto fail;
		}
1263
		area->pages[i] = page;
Linus Torvalds's avatar
Linus Torvalds committed
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
	}

	if (map_vm_area(area, prot, &pages))
		goto fail;
	return area->addr;

fail:
	vfree(area->addr);
	return NULL;
}

Christoph Lameter's avatar
Christoph Lameter committed
1275
1276
void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
{
1277
1278
	return __vmalloc_area_node(area, gfp_mask, prot, -1,
					__builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
1279
1280
}

Linus Torvalds's avatar
Linus Torvalds committed
1281
/**
Christoph Lameter's avatar
Christoph Lameter committed
1282
 *	__vmalloc_node  -  allocate virtually contiguous memory
Linus Torvalds's avatar
Linus Torvalds committed
1283
1284
1285
 *	@size:		allocation size
 *	@gfp_mask:	flags for the page level allocator
 *	@prot:		protection mask for the allocated pages
1286
 *	@node:		node to use for allocation or -1
1287
 *	@caller:	caller's return address
Linus Torvalds's avatar
Linus Torvalds committed
1288
1289
1290
1291
1292
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator with @gfp_mask flags.  Map them into contiguous
 *	kernel virtual space, using a pagetable protection of @prot.
 */
1293
static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
1294
						int node, void *caller)
Linus Torvalds's avatar
Linus Torvalds committed
1295
1296
1297
1298
1299
1300
1301
{
	struct vm_struct *area;

	size = PAGE_ALIGN(size);
	if (!size || (size >> PAGE_SHIFT) > num_physpages)
		return NULL;

1302
1303
1304
	area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
						node, gfp_mask, caller);

Linus Torvalds's avatar
Linus Torvalds committed
1305
1306
1307
	if (!area)
		return NULL;

1308
	return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
Linus Torvalds's avatar
Linus Torvalds committed
1309
1310
}

Christoph Lameter's avatar
Christoph Lameter committed
1311
1312
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
1313
1314
	return __vmalloc_node(size, gfp_mask, prot, -1,
				__builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
1315
}
Linus Torvalds's avatar
Linus Torvalds committed
1316
1317
1318
1319
1320
1321
1322
1323
EXPORT_SYMBOL(__vmalloc);

/**
 *	vmalloc  -  allocate virtually contiguous memory
 *	@size:		allocation size
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
1324
 *	For tight control over page level allocator and protection flags
Linus Torvalds's avatar
Linus Torvalds committed
1325
1326
1327
1328
 *	use __vmalloc() instead.
 */
void *vmalloc(unsigned long size)
{
1329
1330
	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
					-1, __builtin_return_address(0));
Linus Torvalds's avatar
Linus Torvalds committed
1331
1332
1333
}
EXPORT_SYMBOL(vmalloc);

1334
/**
1335
1336
 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
 * @size: allocation size
1337
 *
1338
1339
 * The resulting memory area is zeroed so it can be mapped to userspace
 * without leaking data.
1340
1341
1342
1343
1344
1345
1346
 */
void *vmalloc_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

	ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
1347
	if (ret) {
Nick Piggin's avatar
Nick Piggin committed
1348
		area = find_vm_area(ret);
1349
1350
		area->flags |= VM_USERMAP;
	}
1351
1352
1353
1354
	return ret;
}
EXPORT_SYMBOL(vmalloc_user);

Christoph Lameter's avatar
Christoph Lameter committed
1355
1356
1357
/**
 *	vmalloc_node  -  allocate memory on a specific node
 *	@size:		allocation size
1358
 *	@node:		numa node
Christoph Lameter's avatar
Christoph Lameter committed
1359
1360
1361
1362
 *
 *	Allocate enough pages to cover @size from the page level
 *	allocator and map them into contiguous kernel virtual space.
 *
1363
 *	For tight control over page level allocator and protection flags
Christoph Lameter's avatar
Christoph Lameter committed
1364
1365
1366
1367
 *	use __vmalloc() instead.
 */
void *vmalloc_node(unsigned long size, int node)
{
1368
1369
	return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
					node, __builtin_return_address(0));
Christoph Lameter's avatar
Christoph Lameter committed
1370
1371
1372
}
EXPORT_SYMBOL(vmalloc_node);

1373
1374
1375
1376
#ifndef PAGE_KERNEL_EXEC
# define PAGE_KERNEL_EXEC PAGE_KERNEL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
1377
1378
1379
1380
1381
1382
1383
1384
/**
 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
 *	@size:		allocation size
 *
 *	Kernel-internal function to allocate enough pages to cover @size
 *	the page level allocator and map them into contiguous and
 *	executable kernel virtual space.
 *
1385
 *	For tight control over page level allocator and protection flags
Linus Torvalds's avatar
Linus Torvalds committed
1386
1387
1388
1389
1390
1391
1392
1393
 *	use __vmalloc() instead.
 */

void *vmalloc_exec(unsigned long size)
{
	return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
}

1394
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1395
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1396
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1397
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1398
1399
1400
1401
#else
#define GFP_VMALLOC32 GFP_KERNEL
#endif

Linus Torvalds's avatar
Linus Torvalds committed
1402
1403
1404
1405
1406
1407
1408
1409
1410
/**
 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
 *	@size:		allocation size
 *
 *	Allocate enough 32bit PA addressable pages to cover @size from the
 *	page level allocator and map them into contiguous kernel virtual space.
 */
void *vmalloc_32(unsigned long size)
{
1411
	return __vmalloc(size, GFP_VMALLOC32, PAGE_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
1412
1413
1414
}
EXPORT_SYMBOL(vmalloc_32);

1415
/**
1416
 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1417
 *	@size:		allocation size
1418
1419
1420
 *
 * The resulting memory area is 32bit addressable and zeroed so it can be
 * mapped to userspace without leaking data.
1421
1422
1423
1424
1425
1426
 */
void *vmalloc_32_user(unsigned long size)
{
	struct vm_struct *area;
	void *ret;

1427
	ret = __vmalloc(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL);
1428
	if (ret) {
Nick Piggin's avatar
Nick Piggin committed
1429
		area = find_vm_area(ret);
1430
1431
		area->flags |= VM_USERMAP;
	}
1432
1433
1434
1435
	return ret;
}
EXPORT_SYMBOL(vmalloc_32_user);

Linus Torvalds's avatar
Linus Torvalds committed
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
long vread(char *buf, char *addr, unsigned long count)
{
	struct vm_struct *tmp;
	char *vaddr, *buf_start = buf;
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	read_lock(&vmlist_lock);
	for (tmp = vmlist; tmp; tmp = tmp->next) {
		vaddr = (char *) tmp->addr;
		if (addr >= vaddr + tmp->size - PAGE_SIZE)
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			*buf = '\0';
			buf++;
			addr++;
			count--;
		}
		n = vaddr + tmp->size - PAGE_SIZE - addr;
		do {
			if (count == 0)
				goto finished;
			*buf = *addr;
			buf++;
			addr++;
			count--;
		} while (--n > 0);
	}
finished:
	read_unlock(&vmlist_lock);
	return buf - buf_start;
}

long vwrite(char *buf, char *addr, unsigned long count)
{
	struct vm_struct *tmp;
	char *vaddr, *buf_start = buf;
	unsigned long n;

	/* Don't allow overflow */
	if ((unsigned long) addr + count < count)
		count = -(unsigned long) addr;

	read_lock(&vmlist_lock);
	for (tmp = vmlist; tmp; tmp = tmp->next) {
		vaddr = (char *) tmp->addr;
		if (addr >= vaddr + tmp->size - PAGE_SIZE)
			continue;
		while (addr < vaddr) {
			if (count == 0)
				goto finished;
			buf++;
			addr++;
			count--;
		}
		n = vaddr + tmp->size - PAGE_SIZE - addr;
		do {
			if (count == 0)
				goto finished;
			*addr = *buf;
			buf++;
			addr++;
			count--;
		} while (--n > 0);
	}
finished:
	read_unlock(&vmlist_lock);
	return buf - buf_start;
}
1510
1511
1512
1513
1514
1515

/**
 *	remap_vmalloc_range  -  map vmalloc pages to userspace
 *	@vma:		vma to cover (map full range of vma)
 *	@addr:		vmalloc memory
 *	@pgoff:		number of pages into addr before first page to map
1516
1517
 *
 *	Returns:	0 for success, -Exxx on failure
1518
1519
1520
1521
1522
 *
 *	This function checks that addr is a valid vmalloc'ed area, and
 *	that it is big enough to cover the vma. Will return failure if
 *	that criteria isn't met.
 *
1523
 *	Similar to remap_pfn_range() (see mm/memory.c)
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
 */
int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
						unsigned long pgoff)
{
	struct vm_struct *area;
	unsigned long uaddr = vma->vm_start;
	unsigned long usize = vma->vm_end - vma->vm_start;

	if ((PAGE_SIZE-1) & (unsigned long)addr)
		return -EINVAL;

Nick Piggin's avatar
Nick Piggin committed
1535
	area = find_vm_area(addr);
1536
	if (!area)
Nick Piggin's avatar
Nick Piggin committed
1537
		return -EINVAL;
1538
1539

	if (!(area->flags & VM_USERMAP))
Nick Piggin's avatar
Nick Piggin committed
1540
		return -EINVAL;
1541
1542

	if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
Nick Piggin's avatar
Nick Piggin committed
1543
		return -EINVAL;
1544
1545
1546
1547

	addr += pgoff << PAGE_SHIFT;
	do {
		struct page *page = vmalloc_to_page(addr);
Nick Piggin's avatar
Nick Piggin committed
1548
1549
		int ret;

1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
		ret = vm_insert_page(vma, uaddr, page);
		if (ret)
			return ret;

		uaddr += PAGE_SIZE;
		addr += PAGE_SIZE;
		usize -= PAGE_SIZE;
	} while (usize > 0);

	/* Prevent "things" like memory migration? VM_flags need a cleanup... */
	vma->vm_flags |= VM_RESERVED;

Nick Piggin's avatar
Nick Piggin committed
1562
	return 0;
1563
1564
1565
}
EXPORT_SYMBOL(remap_vmalloc_range);

1566
1567
1568
1569
1570
1571
1572
/*
 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
 * have one.
 */
void  __attribute__((weak)) vmalloc_sync_all(void)
{
}
1573
1574


1575
static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
1576
1577
1578
1579
1580
1581
1582
1583
{
	/* apply_to_page_range() does all the hard work. */
	return 0;
}

/**
 *	alloc_vm_area - allocate a range of kernel address space
 *	@size:		size of the area
1584
1585
 *
 *	Returns:	NULL on failure, vm_struct on success
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
 *
 *	This function reserves a range of kernel address space, and
 *	allocates pagetables to map that range.  No actual mappings
 *	are created.  If the kernel address space is not shared
 *	between processes, it syncs the pagetable across all
 *	processes.
 */
struct vm_struct *alloc_vm_area(size_t size)
{
	struct vm_struct *area;

1597
1598
	area = get_vm_area_caller(size, VM_IOREMAP,
				__builtin_return_address(0));
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
	if (area == NULL)
		return NULL;

	/*
	 * This ensures that page tables are constructed for this region
	 * of kernel virtual address space and mapped into init_mm.
	 */
	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
				area->size, f, NULL)) {
		free_vm_area(area);
		return NULL;
	}

	/* Make sure the pagetables are constructed in process kernel
	   mappings */
	vmalloc_sync_all();

	return area;
}
EXPORT_SYMBOL_GPL(alloc_vm_area);

void free_vm_area(struct vm_struct *area)
{
	struct vm_struct *ret;
	ret = remove_vm_area(area->addr);
	BUG_ON(ret != area);
	kfree(area);
}
EXPORT_SYMBOL_GPL(free_vm_area);
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661


#ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos)
{
	loff_t n = *pos;
	struct vm_struct *v;

	read_lock(&vmlist_lock);
	v = vmlist;
	while (n > 0 && v) {
		n--;
		v = v->next;
	}
	if (!n)
		return v;

	return NULL;

}

static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
	struct vm_struct *v = p;

	++*pos;
	return v->next;
}

static void s_stop(struct seq_file *m, void *p)
{
	read_unlock(&vmlist_lock);
}

1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
{
	if (NUMA_BUILD) {
		unsigned int nr, *counters = m->private;

		if (!counters)
			return;

		memset(counters, 0, nr_node_ids * sizeof(unsigned int));

		for (nr = 0; nr < v->nr_pages; nr++)
			counters[page_to_nid(v->pages[nr])]++;

		for_each_node_state(nr, N_HIGH_MEMORY)
			if (counters[nr])
				seq_printf(m, " N%u=%u", nr, counters[nr]);
	}
}

1681
1682
1683
1684
1685
1686
1687
static int s_show(struct seq_file *m, void *p)
{
	struct vm_struct *v = p;

	seq_printf(m, "0x%p-0x%p %7ld",
		v->addr, v->addr + v->size, v->size);

1688
1689
1690
1691
1692
1693
1694
1695
	if (v->caller) {
		char buff[2 * KSYM_NAME_LEN];

		seq_putc(m, ' ');
		sprint_symbol(buff, (unsigned long)v->caller);
		seq_puts(m, buff);
	}

1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
	if (v->nr_pages)
		seq_printf(m, " pages=%d", v->nr_pages);

	if (v->phys_addr)
		seq_printf(m, " phys=%lx", v->phys_addr);

	if (v->flags & VM_IOREMAP)
		seq_printf(m, " ioremap");

	if (v->flags & VM_ALLOC)
		seq_printf(m, " vmalloc");

	if (v->flags & VM_MAP)
		seq_printf(m, " vmap");

	if (v->flags & VM_USERMAP)
		seq_printf(m, " user");

	if (v->flags & VM_VPAGES)
		seq_printf(m, " vpages");

1717
	show_numa_info(m, v);
1718
1719
1720
1721
	seq_putc(m, '\n');
	return 0;
}

1722
static const struct seq_operations vmalloc_op = {
1723
1724
1725
1726
1727
	.start = s_start,
	.next = s_next,
	.stop = s_stop,
	.show = s_show,
};
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757

static int vmalloc_open(struct inode *inode, struct file *file)
{
	unsigned int *ptr = NULL;
	int ret;

	if (NUMA_BUILD)
		ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
	ret = seq_open(file, &vmalloc_op);
	if (!ret) {
		struct seq_file *m = file->private_data;
		m->private = ptr;
	} else
		kfree(ptr);
	return ret;
}

static const struct file_operations proc_vmalloc_operations = {
	.open		= vmalloc_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= seq_release_private,
};

static int __init proc_vmalloc_init(void)
{
	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
	return 0;
}
module_init(proc_vmalloc_init);
1758
1759
#endif