memory.c 71 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
/*
 *  linux/mm/memory.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 */

/*
 * demand-loading started 01.12.91 - seems it is high on the list of
 * things wanted, and it should be easy to implement. - Linus
 */

/*
 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
 * pages started 02.12.91, seems to work. - Linus.
 *
 * Tested sharing by executing about 30 /bin/sh: under the old kernel it
 * would have taken more than the 6M I have free, but it worked well as
 * far as I could see.
 *
 * Also corrected some "invalidate()"s - I wasn't doing enough of them.
 */

/*
 * Real VM (paging to/from disk) started 18.12.91. Much more work and
 * thought has to go into this. Oh, well..
 * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
 *		Found it. Everything seems to work now.
 * 20.12.91  -  Ok, making the swap-device changeable like the root.
 */

/*
 * 05.04.94  -  Multi-page memory management added for v1.1.
 * 		Idea by Alex Bligh (alex@cconcepts.co.uk)
 *
 * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
 *		(Gerhard.Wichert@pdb.siemens.de)
 *
 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
 */

#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/rmap.h>
#include <linux/module.h>
50
#include <linux/delayacct.h>
Linus Torvalds's avatar
Linus Torvalds committed
51
#include <linux/init.h>
52
#include <linux/writeback.h>
Linus Torvalds's avatar
Linus Torvalds committed
53
54
55
56
57
58
59
60
61
62

#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>

#include <linux/swapops.h>
#include <linux/elf.h>

63
#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds's avatar
Linus Torvalds committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
/* use the per-pgdat data instead for discontigmem - mbligh */
unsigned long max_mapnr;
struct page *mem_map;

EXPORT_SYMBOL(max_mapnr);
EXPORT_SYMBOL(mem_map);
#endif

unsigned long num_physpages;
/*
 * A number of key systems in x86 including ioremap() rely on the assumption
 * that high_memory defines the upper bound on direct map memory, then end
 * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
 * and ZONE_HIGHMEM.
 */
void * high_memory;
unsigned long vmalloc_earlyreserve;

EXPORT_SYMBOL(num_physpages);
EXPORT_SYMBOL(high_memory);
EXPORT_SYMBOL(vmalloc_earlyreserve);

87
88
89
90
91
int randomize_va_space __read_mostly = 1;

static int __init disable_randmaps(char *s)
{
	randomize_va_space = 0;
92
	return 1;
93
94
95
96
}
__setup("norandmaps", disable_randmaps);


Linus Torvalds's avatar
Linus Torvalds committed
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
/*
 * If a p?d_bad entry is found while walking page tables, report
 * the error, before resetting entry to p?d_none.  Usually (but
 * very seldom) called out from the p?d_none_or_clear_bad macros.
 */

void pgd_clear_bad(pgd_t *pgd)
{
	pgd_ERROR(*pgd);
	pgd_clear(pgd);
}

void pud_clear_bad(pud_t *pud)
{
	pud_ERROR(*pud);
	pud_clear(pud);
}

void pmd_clear_bad(pmd_t *pmd)
{
	pmd_ERROR(*pmd);
	pmd_clear(pmd);
}

/*
 * Note: this doesn't free the actual pages themselves. That
 * has been handled earlier when unmapping all the memory regions.
 */
125
static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
Linus Torvalds's avatar
Linus Torvalds committed
126
{
127
128
	struct page *page = pmd_page(*pmd);
	pmd_clear(pmd);
129
	pte_lock_deinit(page);
130
	pte_free_tlb(tlb, page);
131
	dec_zone_page_state(page, NR_PAGETABLE);
132
	tlb->mm->nr_ptes--;
Linus Torvalds's avatar
Linus Torvalds committed
133
134
}

135
136
137
static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				unsigned long addr, unsigned long end,
				unsigned long floor, unsigned long ceiling)
Linus Torvalds's avatar
Linus Torvalds committed
138
139
140
{
	pmd_t *pmd;
	unsigned long next;
141
	unsigned long start;
Linus Torvalds's avatar
Linus Torvalds committed
142

143
	start = addr;
Linus Torvalds's avatar
Linus Torvalds committed
144
145
146
147
148
	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(pmd))
			continue;
149
		free_pte_range(tlb, pmd);
Linus Torvalds's avatar
Linus Torvalds committed
150
151
	} while (pmd++, addr = next, addr != end);

152
153
154
155
156
157
158
	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
Linus Torvalds's avatar
Linus Torvalds committed
159
	}
160
161
162
163
164
165
	if (end - 1 > ceiling - 1)
		return;

	pmd = pmd_offset(pud, start);
	pud_clear(pud);
	pmd_free_tlb(tlb, pmd);
Linus Torvalds's avatar
Linus Torvalds committed
166
167
}

168
169
170
static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				unsigned long addr, unsigned long end,
				unsigned long floor, unsigned long ceiling)
Linus Torvalds's avatar
Linus Torvalds committed
171
172
173
{
	pud_t *pud;
	unsigned long next;
174
	unsigned long start;
Linus Torvalds's avatar
Linus Torvalds committed
175

176
	start = addr;
Linus Torvalds's avatar
Linus Torvalds committed
177
178
179
180
181
	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
182
		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
Linus Torvalds's avatar
Linus Torvalds committed
183
184
	} while (pud++, addr = next, addr != end);

185
186
187
188
189
190
191
	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
Linus Torvalds's avatar
Linus Torvalds committed
192
	}
193
194
195
196
197
198
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
	pud_free_tlb(tlb, pud);
Linus Torvalds's avatar
Linus Torvalds committed
199
200
201
}

/*
202
203
 * This function frees user-level page tables of a process.
 *
Linus Torvalds's avatar
Linus Torvalds committed
204
205
 * Must be called with pagetable lock held.
 */
206
void free_pgd_range(struct mmu_gather **tlb,
207
208
			unsigned long addr, unsigned long end,
			unsigned long floor, unsigned long ceiling)
Linus Torvalds's avatar
Linus Torvalds committed
209
210
211
{
	pgd_t *pgd;
	unsigned long next;
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
	unsigned long start;

	/*
	 * The next few lines have given us lots of grief...
	 *
	 * Why are we testing PMD* at this top level?  Because often
	 * there will be no work to do at all, and we'd prefer not to
	 * go all the way down to the bottom just to discover that.
	 *
	 * Why all these "- 1"s?  Because 0 represents both the bottom
	 * of the address space and the top of it (using -1 for the
	 * top wouldn't help much: the masks would do the wrong thing).
	 * The rule is that addr 0 and floor 0 refer to the bottom of
	 * the address space, but end 0 and ceiling 0 refer to the top
	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
	 * that end 0 case should be mythical).
	 *
	 * Wherever addr is brought up or ceiling brought down, we must
	 * be careful to reject "the opposite 0" before it confuses the
	 * subsequent tests.  But what about where end is brought down
	 * by PMD_SIZE below? no, end can't go down to 0 there.
	 *
	 * Whereas we round start (addr) and ceiling down, by different
	 * masks at different levels, in order to test whether a table
	 * now has no other vmas using it, so can be freed, we don't
	 * bother to round floor or end up - the tests don't need that.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
239

240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
	addr &= PMD_MASK;
	if (addr < floor) {
		addr += PMD_SIZE;
		if (!addr)
			return;
	}
	if (ceiling) {
		ceiling &= PMD_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		end -= PMD_SIZE;
	if (addr > end - 1)
		return;

	start = addr;
257
	pgd = pgd_offset((*tlb)->mm, addr);
Linus Torvalds's avatar
Linus Torvalds committed
258
259
260
261
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
262
		free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
Linus Torvalds's avatar
Linus Torvalds committed
263
	} while (pgd++, addr = next, addr != end);
264

265
	if (!(*tlb)->fullmm)
266
		flush_tlb_pgtables((*tlb)->mm, start, end);
267
268
269
}

void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
270
		unsigned long floor, unsigned long ceiling)
271
272
273
274
275
{
	while (vma) {
		struct vm_area_struct *next = vma->vm_next;
		unsigned long addr = vma->vm_start;

276
277
278
279
280
281
		/*
		 * Hide vma from rmap and vmtruncate before freeing pgtables
		 */
		anon_vma_unlink(vma);
		unlink_file_vma(vma);

282
		if (is_vm_hugetlb_page(vma)) {
283
			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
284
				floor, next? next->vm_start: ceiling);
285
286
287
288
289
		} else {
			/*
			 * Optimization: gather nearby vmas into one call down
			 */
			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
290
			       && !is_vm_hugetlb_page(next)) {
291
292
				vma = next;
				next = vma->vm_next;
293
294
				anon_vma_unlink(vma);
				unlink_file_vma(vma);
295
296
297
298
			}
			free_pgd_range(tlb, addr, vma->vm_end,
				floor, next? next->vm_start: ceiling);
		}
299
300
		vma = next;
	}
Linus Torvalds's avatar
Linus Torvalds committed
301
302
}

303
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
304
{
305
	struct page *new = pte_alloc_one(mm, address);
306
307
308
	if (!new)
		return -ENOMEM;

309
	pte_lock_init(new);
310
	spin_lock(&mm->page_table_lock);
311
312
	if (pmd_present(*pmd)) {	/* Another has populated it */
		pte_lock_deinit(new);
313
		pte_free(new);
314
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
315
		mm->nr_ptes++;
316
		inc_zone_page_state(new, NR_PAGETABLE);
Linus Torvalds's avatar
Linus Torvalds committed
317
318
		pmd_populate(mm, pmd, new);
	}
319
	spin_unlock(&mm->page_table_lock);
320
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
321
322
}

323
int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
324
{
325
326
327
328
329
330
331
332
333
334
335
	pte_t *new = pte_alloc_one_kernel(&init_mm, address);
	if (!new)
		return -ENOMEM;

	spin_lock(&init_mm.page_table_lock);
	if (pmd_present(*pmd))		/* Another has populated it */
		pte_free_kernel(new);
	else
		pmd_populate_kernel(&init_mm, pmd, new);
	spin_unlock(&init_mm.page_table_lock);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
336
337
}

338
339
340
341
342
343
344
345
static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
{
	if (file_rss)
		add_mm_counter(mm, file_rss, file_rss);
	if (anon_rss)
		add_mm_counter(mm, anon_rss, anon_rss);
}

Nick Piggin's avatar
Nick Piggin committed
346
/*
347
348
349
 * This function is called to print an error when a bad pte
 * is found. For example, we might have a PFN-mapped pte in
 * a region that doesn't allow it.
Nick Piggin's avatar
Nick Piggin committed
350
351
352
353
354
355
356
357
358
359
360
361
362
 *
 * The calling function must still handle the error.
 */
void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
{
	printk(KERN_ERR "Bad pte = %08llx, process = %s, "
			"vm_flags = %lx, vaddr = %lx\n",
		(long long)pte_val(pte),
		(vma->vm_mm == current->mm ? current->comm : "???"),
		vma->vm_flags, vaddr);
	dump_stack();
}

363
364
365
366
367
static inline int is_cow_mapping(unsigned int flags)
{
	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
}

368
/*
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
 * This function gets the "struct page" associated with a pte.
 *
 * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
 * will have each page table entry just pointing to a raw page frame
 * number, and as far as the VM layer is concerned, those do not have
 * pages associated with them - even if the PFN might point to memory
 * that otherwise is perfectly fine and has a "struct page".
 *
 * The way we recognize those mappings is through the rules set up
 * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
 * and the vm_pgoff will point to the first PFN mapped: thus every
 * page that is a raw mapping will always honor the rule
 *
 *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
 *
 * and if that isn't true, the page has been COW'ed (in which case it
 * _does_ have a "struct page" associated with it even if it is in a
 * VM_PFNMAP range).
387
 */
388
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
389
{
390
391
	unsigned long pfn = pte_pfn(pte);

Nick Piggin's avatar
Nick Piggin committed
392
	if (unlikely(vma->vm_flags & VM_PFNMAP)) {
393
394
395
		unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
		if (pfn == vma->vm_pgoff + off)
			return NULL;
396
		if (!is_cow_mapping(vma->vm_flags))
397
			return NULL;
398
399
	}

400
401
402
403
404
405
	/*
	 * Add some anal sanity checks for now. Eventually,
	 * we should just do "return pfn_to_page(pfn)", but
	 * in the meantime we check that we get a valid pfn,
	 * and that the resulting page looks ok.
	 */
406
407
408
409
410
411
412
413
414
415
416
417
418
	if (unlikely(!pfn_valid(pfn))) {
		print_bad_pte(vma, pte, addr);
		return NULL;
	}

	/*
	 * NOTE! We still have PageReserved() pages in the page 
	 * tables. 
	 *
	 * The PAGE_ZERO() pages and various VDSO mappings can
	 * cause them to exist.
	 */
	return pfn_to_page(pfn);
419
420
}

Linus Torvalds's avatar
Linus Torvalds committed
421
422
423
424
425
426
/*
 * copy one vm_area from one task to the other. Assumes the page tables
 * already present in the new task to be cleared in the whole range
 * covered by this vma.
 */

427
static inline void
Linus Torvalds's avatar
Linus Torvalds committed
428
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
Nick Piggin's avatar
Nick Piggin committed
429
		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
430
		unsigned long addr, int *rss)
Linus Torvalds's avatar
Linus Torvalds committed
431
{
Nick Piggin's avatar
Nick Piggin committed
432
	unsigned long vm_flags = vma->vm_flags;
Linus Torvalds's avatar
Linus Torvalds committed
433
434
435
436
437
438
	pte_t pte = *src_pte;
	struct page *page;

	/* pte contains position in swap or file, so copy. */
	if (unlikely(!pte_present(pte))) {
		if (!pte_file(pte)) {
439
440
441
			swp_entry_t entry = pte_to_swp_entry(pte);

			swap_duplicate(entry);
Linus Torvalds's avatar
Linus Torvalds committed
442
443
444
			/* make sure dst_mm is on swapoff's mmlist. */
			if (unlikely(list_empty(&dst_mm->mmlist))) {
				spin_lock(&mmlist_lock);
445
446
447
				if (list_empty(&dst_mm->mmlist))
					list_add(&dst_mm->mmlist,
						 &src_mm->mmlist);
Linus Torvalds's avatar
Linus Torvalds committed
448
449
				spin_unlock(&mmlist_lock);
			}
450
451
452
453
454
455
456
457
458
459
			if (is_write_migration_entry(entry) &&
					is_cow_mapping(vm_flags)) {
				/*
				 * COW mappings require pages in both parent
				 * and child to be set to read.
				 */
				make_migration_entry_read(&entry);
				pte = swp_entry_to_pte(entry);
				set_pte_at(src_mm, addr, src_pte, pte);
			}
Linus Torvalds's avatar
Linus Torvalds committed
460
		}
461
		goto out_set_pte;
Linus Torvalds's avatar
Linus Torvalds committed
462
463
464
465
466
467
	}

	/*
	 * If it's a COW mapping, write protect it both
	 * in the parent and the child
	 */
468
	if (is_cow_mapping(vm_flags)) {
Linus Torvalds's avatar
Linus Torvalds committed
469
		ptep_set_wrprotect(src_mm, addr, src_pte);
470
		pte = pte_wrprotect(pte);
Linus Torvalds's avatar
Linus Torvalds committed
471
472
473
474
475
476
477
478
479
	}

	/*
	 * If it's a shared mapping, mark it clean in
	 * the child
	 */
	if (vm_flags & VM_SHARED)
		pte = pte_mkclean(pte);
	pte = pte_mkold(pte);
480
481
482
483
484
485
486

	page = vm_normal_page(vma, addr, pte);
	if (page) {
		get_page(page);
		page_dup_rmap(page);
		rss[!!PageAnon(page)]++;
	}
487
488
489

out_set_pte:
	set_pte_at(dst_mm, addr, dst_pte, pte);
Linus Torvalds's avatar
Linus Torvalds committed
490
491
492
493
494
495
496
}

static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pte_t *src_pte, *dst_pte;
497
	spinlock_t *src_ptl, *dst_ptl;
498
	int progress = 0;
499
	int rss[2];
Linus Torvalds's avatar
Linus Torvalds committed
500
501

again:
502
	rss[1] = rss[0] = 0;
503
	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
Linus Torvalds's avatar
Linus Torvalds committed
504
505
506
	if (!dst_pte)
		return -ENOMEM;
	src_pte = pte_offset_map_nested(src_pmd, addr);
507
	src_ptl = pte_lockptr(src_mm, src_pmd);
Ingo Molnar's avatar
Ingo Molnar committed
508
	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513
514

	do {
		/*
		 * We are holding two locks at this point - either of them
		 * could generate latencies in another task on another CPU.
		 */
515
516
517
		if (progress >= 32) {
			progress = 0;
			if (need_resched() ||
518
519
			    need_lockbreak(src_ptl) ||
			    need_lockbreak(dst_ptl))
520
521
				break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
522
523
524
525
		if (pte_none(*src_pte)) {
			progress++;
			continue;
		}
526
		copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
Linus Torvalds's avatar
Linus Torvalds committed
527
528
529
		progress += 8;
	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);

530
	spin_unlock(src_ptl);
Linus Torvalds's avatar
Linus Torvalds committed
531
	pte_unmap_nested(src_pte - 1);
532
	add_mm_rss(dst_mm, rss[0], rss[1]);
533
534
	pte_unmap_unlock(dst_pte - 1, dst_ptl);
	cond_resched();
Linus Torvalds's avatar
Linus Torvalds committed
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
	if (addr != end)
		goto again;
	return 0;
}

static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pmd_t *src_pmd, *dst_pmd;
	unsigned long next;

	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
	if (!dst_pmd)
		return -ENOMEM;
	src_pmd = pmd_offset(src_pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_none_or_clear_bad(src_pmd))
			continue;
		if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
	return 0;
}

static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
		unsigned long addr, unsigned long end)
{
	pud_t *src_pud, *dst_pud;
	unsigned long next;

	dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
	if (!dst_pud)
		return -ENOMEM;
	src_pud = pud_offset(src_pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(src_pud))
			continue;
		if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pud++, src_pud++, addr = next, addr != end);
	return 0;
}

int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
		struct vm_area_struct *vma)
{
	pgd_t *src_pgd, *dst_pgd;
	unsigned long next;
	unsigned long addr = vma->vm_start;
	unsigned long end = vma->vm_end;

592
593
594
595
596
597
	/*
	 * Don't copy ptes where a page fault will fill them correctly.
	 * Fork becomes much lighter when there are big shared or private
	 * readonly mappings. The tradeoff is that copy_page_range is more
	 * efficient than faulting.
	 */
598
	if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
599
600
601
602
		if (!vma->anon_vma)
			return 0;
	}

Linus Torvalds's avatar
Linus Torvalds committed
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
	if (is_vm_hugetlb_page(vma))
		return copy_hugetlb_page_range(dst_mm, src_mm, vma);

	dst_pgd = pgd_offset(dst_mm, addr);
	src_pgd = pgd_offset(src_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(src_pgd))
			continue;
		if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
						vma, addr, next))
			return -ENOMEM;
	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
	return 0;
}

619
static unsigned long zap_pte_range(struct mmu_gather *tlb,
Nick Piggin's avatar
Nick Piggin committed
620
				struct vm_area_struct *vma, pmd_t *pmd,
Linus Torvalds's avatar
Linus Torvalds committed
621
				unsigned long addr, unsigned long end,
622
				long *zap_work, struct zap_details *details)
Linus Torvalds's avatar
Linus Torvalds committed
623
{
Nick Piggin's avatar
Nick Piggin committed
624
	struct mm_struct *mm = tlb->mm;
Linus Torvalds's avatar
Linus Torvalds committed
625
	pte_t *pte;
626
	spinlock_t *ptl;
627
628
	int file_rss = 0;
	int anon_rss = 0;
Linus Torvalds's avatar
Linus Torvalds committed
629

630
	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
631
632
	do {
		pte_t ptent = *pte;
633
634
		if (pte_none(ptent)) {
			(*zap_work)--;
Linus Torvalds's avatar
Linus Torvalds committed
635
			continue;
636
		}
637
638
639

		(*zap_work) -= PAGE_SIZE;

Linus Torvalds's avatar
Linus Torvalds committed
640
		if (pte_present(ptent)) {
641
			struct page *page;
642

643
			page = vm_normal_page(vma, addr, ptent);
Linus Torvalds's avatar
Linus Torvalds committed
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
			if (unlikely(details) && page) {
				/*
				 * unmap_shared_mapping_pages() wants to
				 * invalidate cache without truncating:
				 * unmap shared but keep private pages.
				 */
				if (details->check_mapping &&
				    details->check_mapping != page->mapping)
					continue;
				/*
				 * Each page->index must be checked when
				 * invalidating or truncating nonlinear.
				 */
				if (details->nonlinear_vma &&
				    (page->index < details->first_index ||
				     page->index > details->last_index))
					continue;
			}
Nick Piggin's avatar
Nick Piggin committed
662
			ptent = ptep_get_and_clear_full(mm, addr, pte,
663
							tlb->fullmm);
Linus Torvalds's avatar
Linus Torvalds committed
664
665
666
667
668
669
			tlb_remove_tlb_entry(tlb, pte, addr);
			if (unlikely(!page))
				continue;
			if (unlikely(details) && details->nonlinear_vma
			    && linear_page_index(details->nonlinear_vma,
						addr) != page->index)
Nick Piggin's avatar
Nick Piggin committed
670
				set_pte_at(mm, addr, pte,
Linus Torvalds's avatar
Linus Torvalds committed
671
672
					   pgoff_to_pte(page->index));
			if (PageAnon(page))
673
				anon_rss--;
674
675
676
677
678
			else {
				if (pte_dirty(ptent))
					set_page_dirty(page);
				if (pte_young(ptent))
					mark_page_accessed(page);
679
				file_rss--;
680
			}
Linus Torvalds's avatar
Linus Torvalds committed
681
682
683
684
685
686
687
688
689
690
691
692
			page_remove_rmap(page);
			tlb_remove_page(tlb, page);
			continue;
		}
		/*
		 * If details->check_mapping, we leave swap entries;
		 * if details->nonlinear_vma, we leave file entries.
		 */
		if (unlikely(details))
			continue;
		if (!pte_file(ptent))
			free_swap_and_cache(pte_to_swp_entry(ptent));
Nick Piggin's avatar
Nick Piggin committed
693
		pte_clear_full(mm, addr, pte, tlb->fullmm);
694
	} while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
695

696
	add_mm_rss(mm, file_rss, anon_rss);
697
	pte_unmap_unlock(pte - 1, ptl);
698
699

	return addr;
Linus Torvalds's avatar
Linus Torvalds committed
700
701
}

702
static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
Nick Piggin's avatar
Nick Piggin committed
703
				struct vm_area_struct *vma, pud_t *pud,
Linus Torvalds's avatar
Linus Torvalds committed
704
				unsigned long addr, unsigned long end,
705
				long *zap_work, struct zap_details *details)
Linus Torvalds's avatar
Linus Torvalds committed
706
707
708
709
710
711
712
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
713
714
		if (pmd_none_or_clear_bad(pmd)) {
			(*zap_work)--;
Linus Torvalds's avatar
Linus Torvalds committed
715
			continue;
716
717
718
719
720
721
		}
		next = zap_pte_range(tlb, vma, pmd, addr, next,
						zap_work, details);
	} while (pmd++, addr = next, (addr != end && *zap_work > 0));

	return addr;
Linus Torvalds's avatar
Linus Torvalds committed
722
723
}

724
static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
Nick Piggin's avatar
Nick Piggin committed
725
				struct vm_area_struct *vma, pgd_t *pgd,
Linus Torvalds's avatar
Linus Torvalds committed
726
				unsigned long addr, unsigned long end,
727
				long *zap_work, struct zap_details *details)
Linus Torvalds's avatar
Linus Torvalds committed
728
729
730
731
732
733
734
{
	pud_t *pud;
	unsigned long next;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
735
736
		if (pud_none_or_clear_bad(pud)) {
			(*zap_work)--;
Linus Torvalds's avatar
Linus Torvalds committed
737
			continue;
738
739
740
741
742
743
		}
		next = zap_pmd_range(tlb, vma, pud, addr, next,
						zap_work, details);
	} while (pud++, addr = next, (addr != end && *zap_work > 0));

	return addr;
Linus Torvalds's avatar
Linus Torvalds committed
744
745
}

746
747
static unsigned long unmap_page_range(struct mmu_gather *tlb,
				struct vm_area_struct *vma,
Linus Torvalds's avatar
Linus Torvalds committed
748
				unsigned long addr, unsigned long end,
749
				long *zap_work, struct zap_details *details)
Linus Torvalds's avatar
Linus Torvalds committed
750
751
752
753
754
755
756
757
758
759
760
761
{
	pgd_t *pgd;
	unsigned long next;

	if (details && !details->check_mapping && !details->nonlinear_vma)
		details = NULL;

	BUG_ON(addr >= end);
	tlb_start_vma(tlb, vma);
	pgd = pgd_offset(vma->vm_mm, addr);
	do {
		next = pgd_addr_end(addr, end);
762
763
		if (pgd_none_or_clear_bad(pgd)) {
			(*zap_work)--;
Linus Torvalds's avatar
Linus Torvalds committed
764
			continue;
765
766
767
768
		}
		next = zap_pud_range(tlb, vma, pgd, addr, next,
						zap_work, details);
	} while (pgd++, addr = next, (addr != end && *zap_work > 0));
Linus Torvalds's avatar
Linus Torvalds committed
769
	tlb_end_vma(tlb, vma);
770
771

	return addr;
Linus Torvalds's avatar
Linus Torvalds committed
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
}

#ifdef CONFIG_PREEMPT
# define ZAP_BLOCK_SIZE	(8 * PAGE_SIZE)
#else
/* No preempt: go for improved straight-line efficiency */
# define ZAP_BLOCK_SIZE	(1024 * PAGE_SIZE)
#endif

/**
 * unmap_vmas - unmap a range of memory covered by a list of vma's
 * @tlbp: address of the caller's struct mmu_gather
 * @vma: the starting vma
 * @start_addr: virtual address at which to start unmapping
 * @end_addr: virtual address at which to end unmapping
 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
 * @details: details of nonlinear truncation or shared cache invalidation
 *
790
 * Returns the end address of the unmapping (restart addr if interrupted).
Linus Torvalds's avatar
Linus Torvalds committed
791
 *
792
 * Unmap all pages in the vma list.
Linus Torvalds's avatar
Linus Torvalds committed
793
 *
794
795
 * We aim to not hold locks for too long (for scheduling latency reasons).
 * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
Linus Torvalds's avatar
Linus Torvalds committed
796
797
798
799
800
801
802
803
804
805
806
 * return the ending mmu_gather to the caller.
 *
 * Only addresses between `start' and `end' will be unmapped.
 *
 * The VMA list must be sorted in ascending virtual address order.
 *
 * unmap_vmas() assumes that the caller will flush the whole unmapped address
 * range after unmap_vmas() returns.  So the only responsibility here is to
 * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
 * drops the lock and schedules.
 */
807
unsigned long unmap_vmas(struct mmu_gather **tlbp,
Linus Torvalds's avatar
Linus Torvalds committed
808
809
810
811
		struct vm_area_struct *vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *details)
{
812
	long zap_work = ZAP_BLOCK_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
813
814
	unsigned long tlb_start = 0;	/* For tlb_finish_mmu */
	int tlb_start_valid = 0;
815
	unsigned long start = start_addr;
Linus Torvalds's avatar
Linus Torvalds committed
816
	spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
817
	int fullmm = (*tlbp)->fullmm;
Linus Torvalds's avatar
Linus Torvalds committed
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837

	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
		unsigned long end;

		start = max(vma->vm_start, start_addr);
		if (start >= vma->vm_end)
			continue;
		end = min(vma->vm_end, end_addr);
		if (end <= vma->vm_start)
			continue;

		if (vma->vm_flags & VM_ACCOUNT)
			*nr_accounted += (end - start) >> PAGE_SHIFT;

		while (start != end) {
			if (!tlb_start_valid) {
				tlb_start = start;
				tlb_start_valid = 1;
			}

838
			if (unlikely(is_vm_hugetlb_page(vma))) {
Linus Torvalds's avatar
Linus Torvalds committed
839
				unmap_hugepage_range(vma, start, end);
840
841
842
843
844
845
846
847
848
849
				zap_work -= (end - start) /
						(HPAGE_SIZE / PAGE_SIZE);
				start = end;
			} else
				start = unmap_page_range(*tlbp, vma,
						start, end, &zap_work, details);

			if (zap_work > 0) {
				BUG_ON(start != end);
				break;
Linus Torvalds's avatar
Linus Torvalds committed
850
851
852
853
854
855
856
			}

			tlb_finish_mmu(*tlbp, tlb_start, start);

			if (need_resched() ||
				(i_mmap_lock && need_lockbreak(i_mmap_lock))) {
				if (i_mmap_lock) {
857
					*tlbp = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
858
859
860
861
862
					goto out;
				}
				cond_resched();
			}

863
			*tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
Linus Torvalds's avatar
Linus Torvalds committed
864
			tlb_start_valid = 0;
865
			zap_work = ZAP_BLOCK_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
866
867
868
		}
	}
out:
869
	return start;	/* which is now the end (or restart) address */
Linus Torvalds's avatar
Linus Torvalds committed
870
871
872
873
874
875
876
877
878
}

/**
 * zap_page_range - remove user pages in a given range
 * @vma: vm_area_struct holding the applicable pages
 * @address: starting address of pages to zap
 * @size: number of bytes to zap
 * @details: details of nonlinear truncation or shared cache invalidation
 */
879
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
Linus Torvalds's avatar
Linus Torvalds committed
880
881
882
883
884
885
886
887
888
		unsigned long size, struct zap_details *details)
{
	struct mm_struct *mm = vma->vm_mm;
	struct mmu_gather *tlb;
	unsigned long end = address + size;
	unsigned long nr_accounted = 0;

	lru_add_drain();
	tlb = tlb_gather_mmu(mm, 0);
889
	update_hiwater_rss(mm);
890
891
892
	end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
	if (tlb)
		tlb_finish_mmu(tlb, address, end);
893
	return end;
Linus Torvalds's avatar
Linus Torvalds committed
894
895
896
897
898
}

/*
 * Do a quick page-table lookup for a single page.
 */
899
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
900
			unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
901
902
903
904
905
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ptep, pte;
906
	spinlock_t *ptl;
Linus Torvalds's avatar
Linus Torvalds committed
907
	struct page *page;
908
	struct mm_struct *mm = vma->vm_mm;
Linus Torvalds's avatar
Linus Torvalds committed
909

910
911
912
913
914
	page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
	if (!IS_ERR(page)) {
		BUG_ON(flags & FOLL_GET);
		goto out;
	}
Linus Torvalds's avatar
Linus Torvalds committed
915

916
	page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
917
918
	pgd = pgd_offset(mm, address);
	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
919
		goto no_page_table;
Linus Torvalds's avatar
Linus Torvalds committed
920
921
922

	pud = pud_offset(pgd, address);
	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
923
		goto no_page_table;
Linus Torvalds's avatar
Linus Torvalds committed
924
925
926
	
	pmd = pmd_offset(pud, address);
	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
927
928
929
930
931
		goto no_page_table;

	if (pmd_huge(*pmd)) {
		BUG_ON(flags & FOLL_GET);
		page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
Linus Torvalds's avatar
Linus Torvalds committed
932
		goto out;
933
	}
Linus Torvalds's avatar
Linus Torvalds committed
934

935
	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
936
937
938
939
	if (!ptep)
		goto out;

	pte = *ptep;
940
941
942
943
	if (!pte_present(pte))
		goto unlock;
	if ((flags & FOLL_WRITE) && !pte_write(pte))
		goto unlock;
944
945
	page = vm_normal_page(vma, address, pte);
	if (unlikely(!page))
946
		goto unlock;
Linus Torvalds's avatar
Linus Torvalds committed
947

948
949
950
951
952
953
954
955
956
957
	if (flags & FOLL_GET)
		get_page(page);
	if (flags & FOLL_TOUCH) {
		if ((flags & FOLL_WRITE) &&
		    !pte_dirty(pte) && !PageDirty(page))
			set_page_dirty(page);
		mark_page_accessed(page);
	}
unlock:
	pte_unmap_unlock(ptep, ptl);
Linus Torvalds's avatar
Linus Torvalds committed
958
out:
959
	return page;
Linus Torvalds's avatar
Linus Torvalds committed
960

961
962
963
964
965
966
967
968
969
970
971
972
no_page_table:
	/*
	 * When core dumping an enormous anonymous area that nobody
	 * has touched so far, we don't want to allocate page tables.
	 */
	if (flags & FOLL_ANON) {
		page = ZERO_PAGE(address);
		if (flags & FOLL_GET)
			get_page(page);
		BUG_ON(flags & FOLL_WRITE);
	}
	return page;
Linus Torvalds's avatar
Linus Torvalds committed
973
974
975
976
977
978
979
}

int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long start, int len, int write, int force,
		struct page **pages, struct vm_area_struct **vmas)
{
	int i;
980
	unsigned int vm_flags;
Linus Torvalds's avatar
Linus Torvalds committed
981
982
983
984
985

	/* 
	 * Require read or write permissions.
	 * If 'force' is set, we only require the "MAY" flags.
	 */
986
987
	vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
	vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
Linus Torvalds's avatar
Linus Torvalds committed
988
989
990
	i = 0;

	do {
991
992
		struct vm_area_struct *vma;
		unsigned int foll_flags;
Linus Torvalds's avatar
Linus Torvalds committed
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011

		vma = find_extend_vma(mm, start);
		if (!vma && in_gate_area(tsk, start)) {
			unsigned long pg = start & PAGE_MASK;
			struct vm_area_struct *gate_vma = get_gate_vma(tsk);
			pgd_t *pgd;
			pud_t *pud;
			pmd_t *pmd;
			pte_t *pte;
			if (write) /* user gate pages are read-only */
				return i ? : -EFAULT;
			if (pg > TASK_SIZE)
				pgd = pgd_offset_k(pg);
			else
				pgd = pgd_offset_gate(mm, pg);
			BUG_ON(pgd_none(*pgd));
			pud = pud_offset(pgd, pg);
			BUG_ON(pud_none(*pud));
			pmd = pmd_offset(pud, pg);
1012
1013
			if (pmd_none(*pmd))
				return i ? : -EFAULT;
Linus Torvalds's avatar
Linus Torvalds committed
1014
			pte = pte_offset_map(pmd, pg);
1015
1016
1017
1018
			if (pte_none(*pte)) {
				pte_unmap(pte);
				return i ? : -EFAULT;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1019
			if (pages) {
1020
				struct page *page = vm_normal_page(gate_vma, start, *pte);
1021
1022
1023
				pages[i] = page;
				if (page)
					get_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
			}
			pte_unmap(pte);
			if (vmas)
				vmas[i] = gate_vma;
			i++;
			start += PAGE_SIZE;
			len--;
			continue;
		}

1034
		if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
1035
				|| !(vm_flags & vma->vm_flags))
Linus Torvalds's avatar
Linus Torvalds committed
1036
1037
1038
1039
1040
1041
1042
			return i ? : -EFAULT;

		if (is_vm_hugetlb_page(vma)) {
			i = follow_hugetlb_page(mm, vma, pages, vmas,
						&start, &len, i);
			continue;
		}
1043
1044
1045
1046
1047
1048
1049
1050

		foll_flags = FOLL_TOUCH;
		if (pages)
			foll_flags |= FOLL_GET;
		if (!write && !(vma->vm_flags & VM_LOCKED) &&
		    (!vma->vm_ops || !vma->vm_ops->nopage))
			foll_flags |= FOLL_ANON;

Linus Torvalds's avatar
Linus Torvalds committed
1051
		do {
1052
			struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
1053

1054
1055
			if (write)
				foll_flags |= FOLL_WRITE;
1056

1057
			cond_resched();
1058
			while (!(page = follow_page(vma, start, foll_flags))) {
1059
1060
1061
				int ret;
				ret = __handle_mm_fault(mm, vma, start,
						foll_flags & FOLL_WRITE);
1062
1063
1064
1065
1066
1067
1068
				/*
				 * The VM_FAULT_WRITE bit tells us that do_wp_page has
				 * broken COW when necessary, even if maybe_mkwrite
				 * decided not to set pte_write. We can thus safely do
				 * subsequent page lookups as if they were reads.
				 */
				if (ret & VM_FAULT_WRITE)
1069
					foll_flags &= ~FOLL_WRITE;
1070
1071
				
				switch (ret & ~VM_FAULT_WRITE) {
Linus Torvalds's avatar
Linus Torvalds committed
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
				case VM_FAULT_MINOR:
					tsk->min_flt++;
					break;
				case VM_FAULT_MAJOR:
					tsk->maj_flt++;
					break;
				case VM_FAULT_SIGBUS:
					return i ? i : -EFAULT;
				case VM_FAULT_OOM:
					return i ? i : -ENOMEM;
				default:
					BUG();
				}
			}
			if (pages) {
1087
				pages[i] = page;
1088
1089

				flush_anon_page(page, start);
1090
				flush_dcache_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
1091
1092
1093
1094
1095
1096
			}
			if (vmas)
				vmas[i] = vma;
			i++;
			start += PAGE_SIZE;
			len--;
1097
1098
		} while (len && start < vma->vm_end);
	} while (len);
Linus Torvalds's avatar
Linus Torvalds committed
1099
1100
1101
1102
1103
1104
1105
1106
	return i;
}
EXPORT_SYMBOL(get_user_pages);

static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
			unsigned long addr, unsigned long end, pgprot_t prot)
{
	pte_t *pte;
1107
	spinlock_t *ptl;
Linus Torvalds's avatar
Linus Torvalds committed
1108

1109
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
1110
1111
1112
	if (!pte)
		return -ENOMEM;
	do {
Nick Piggin's avatar
Nick Piggin committed
1113
1114
1115
1116
1117
		struct page *page = ZERO_PAGE(addr);
		pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
		page_cache_get(page);
		page_add_file_rmap(page);
		inc_mm_counter(mm, file_rss);
Linus Torvalds's avatar
Linus Torvalds committed
1118
1119
1120
		BUG_ON(!pte_none(*pte));
		set_pte_at(mm, addr, pte, zero_pte);
	} while (pte++, addr += PAGE_SIZE, addr != end);
1121
	pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds's avatar
Linus Torvalds committed
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
	return 0;
}

static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
			unsigned long addr, unsigned long end, pgprot_t prot)
{
	pmd_t *pmd;
	unsigned long next;

	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (zeromap_pte_range(mm, pmd, addr, next, prot))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
			unsigned long addr, unsigned long end, pgprot_t prot)
{
	pud_t *pud;
	unsigned long next;

	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (zeromap_pmd_range(mm, pud, addr, next, prot))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

int zeromap_page_range(struct vm_area_struct *vma,
			unsigned long addr, unsigned long size, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long next;
	unsigned long end = addr + size;
	struct mm_struct *mm = vma->vm_mm;
	int err;

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		err = zeromap_pud_range(mm, pgd, addr, next, prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	return err;
}

1180
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
1181
1182
1183
1184
{
	pgd_t * pgd = pgd_offset(mm, addr);
	pud_t * pud = pud_alloc(mm, pgd, addr);
	if (pud) {
1185
		pmd_t * pmd = pmd_alloc(mm, pud, addr);
1186
1187
1188
1189
1190
1191
		if (pmd)
			return pte_alloc_map_lock(mm, pmd, addr, ptl);
	}
	return NULL;
}

1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
/*
 * This is the old fallback for page remapping.
 *
 * For historical reasons, it only allows reserved pages. Only
 * old drivers should use this, and they needed to mark their
 * pages reserved for the old functions anyway.
 */
static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
{
	int retval;
1202
	pte_t *pte;
1203
1204
1205
	spinlock_t *ptl;  

	retval = -EINVAL;
1206
	if (PageAnon(page))
1207
1208
1209
		goto out;
	retval = -ENOMEM;
	flush_dcache_page(page);
1210
	pte = get_locked_pte(mm, addr, &ptl);
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
	if (!pte)
		goto out;
	retval = -EBUSY;
	if (!pte_none(*pte))
		goto out_unlock;

	/* Ok, finally just insert the thing.. */
	get_page(page);
	inc_mm_counter(mm, file_rss);
	page_add_file_rmap(page);
	set_pte_at(mm, addr, pte, mk_pte(page, prot));

	retval = 0;
out_unlock:
	pte_unmap_unlock(pte, ptl);
out:
	return retval;
}

1230
1231
1232
1233
1234
1235
/**
 * vm_insert_page - insert single page into user vma
 * @vma: user vma to map to
 * @addr: target user address of this page
 * @page: source kernel page
 *
1236
1237
1238
1239
1240
1241
 * This allows drivers to insert individual pages they've allocated
 * into a user vma.
 *
 * The page has to be a nice clean _individual_ kernel allocation.
 * If you allocate a compound page, you need to have marked it as
 * such (__GFP_COMP), or manually just split the page up yourself
1242
 * (see split_page()).
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
 *
 * NOTE! Traditionally this was done with "remap_pfn_range()" which
 * took an arbitrary page protection parameter. This doesn't allow
 * that. Your vma protection will have to be set up correctly, which
 * means that if you want a shared writable mapping, you'd better
 * ask for a shared writable mapping!
 *
 * The page does not need to be reserved.
 */
int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
{
	if (addr < vma->vm_start || addr >= vma->vm_end)
		return -EFAULT;
	if (!page_count(page))
		return -EINVAL;
1258
	vma->vm_flags |= VM_INSERTPAGE;
1259
1260
	return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
}
1261
EXPORT_SYMBOL(vm_insert_page);
1262

Linus Torvalds's avatar
Linus Torvalds committed
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
/*
 * maps a range of physical memory into the requested pages. the old
 * mappings are removed. any references to nonexistent pages results
 * in null mappings (currently treated as "copy-on-access")
 */
static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
			unsigned long addr, unsigned long end,
			unsigned long pfn, pgprot_t prot)
{
	pte_t *pte;
1273
	spinlock_t *ptl;
Linus Torvalds's avatar
Linus Torvalds committed
1274

1275
	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
1276
1277
1278
1279
	if (!pte)
		return -ENOMEM;
	do {
		BUG_ON(!pte_none(*pte));
Nick Piggin's avatar
Nick Piggin committed
1280
		set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
Linus Torvalds's avatar
Linus Torvalds committed
1281
1282
		pfn++;
	} while (pte++, addr += PAGE_SIZE, addr != end);
1283
	pte_unmap_unlock(pte - 1, ptl);
Linus Torvalds's avatar
Linus Torvalds committed
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
	return 0;
}

static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
			unsigned long addr, unsigned long end,
			unsigned long pfn, pgprot_t prot)
{
	pmd_t *pmd;
	unsigned long next;

	pfn -= addr >> PAGE_SHIFT;
	pmd = pmd_alloc(mm, pud, addr);
	if (!pmd)
		return -ENOMEM;
	do {
		next = pmd_addr_end(addr, end);
		if (remap_pte_range(mm, pmd, addr, next,
				pfn + (addr >> PAGE_SHIFT), prot))
			return -ENOMEM;
	} while (pmd++, addr = next, addr != end);
	return 0;
}

static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
			unsigned long addr, unsigned long end,
			unsigned long pfn, pgprot_t prot)
{
	pud_t *pud;
	unsigned long next;

	pfn -= addr >> PAGE_SHIFT;
	pud = pud_alloc(mm, pgd, addr);
	if (!pud)
		return -ENOMEM;
	do {
		next = pud_addr_end(addr, end);
		if (remap_pmd_range(mm, pud, addr, next,
				pfn + (addr >> PAGE_SHIFT), prot))
			return -ENOMEM;
	} while (pud++, addr = next, addr != end);
	return 0;
}

1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
/**
 * remap_pfn_range - remap kernel memory to userspace
 * @vma: user vma to map to
 * @addr: target user address to start at
 * @pfn: physical address of kernel memory
 * @size: size of map area
 * @prot: page protection flags for this mapping
 *
 *  Note: this is only safe if the mm semaphore is held when called.
 */
Linus Torvalds's avatar
Linus Torvalds committed
1337
1338
1339
1340
1341
int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
		    unsigned long pfn, unsigned long size, pgprot_t prot)
{
	pgd_t *pgd;
	unsigned long next;
1342
	unsigned long end = addr + PAGE_ALIGN(size);
Linus Torvalds's avatar
Linus Torvalds committed
1343
1344
1345
1346
1347
1348
1349
1350
	struct mm_struct *mm = vma->vm_mm;
	int err;

	/*
	 * Physically remapped pages are special. Tell the
	 * rest of the world about it:
	 *   VM_IO tells people not to look at these pages
	 *	(accesses can have side effects).
Hugh Dickins's avatar
Hugh Dickins committed
1351
1352
1353
1354
1355
	 *   VM_RESERVED is specified all over the place, because
	 *	in 2.4 it kept swapout's vma scan off this vma; but
	 *	in 2.6 the LRU scan won't even find its pages, so this
	 *	flag means no more than count its pages in reserved_vm,
	 * 	and omit it from core dump, even when VM_IO turned off.
1356
1357
1358
	 *   VM_PFNMAP tells the core MM that the base pages are just
	 *	raw PFN mappings, and do not have a "struct page" associated
	 *	with them.
1359
1360
1361
1362
	 *
	 * There's a horrible special case to handle copy-on-write
	 * behaviour that some programs depend on. We mark the "original"
	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
Linus Torvalds's avatar
Linus Torvalds committed
1363
	 */
1364
	if (is_cow_mapping(vma->vm_flags)) {
1365
		if (addr != vma->vm_start || end != vma->vm_end)
1366
			return -EINVAL;
1367
1368
1369
		vma->vm_pgoff = pfn;
	}

1370
	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
Linus Torvalds's avatar
Linus Torvalds committed
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386

	BUG_ON(addr >= end);
	pfn -= addr >> PAGE_SHIFT;
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		err = remap_pud_range(mm, pgd, addr, next,
				pfn + (addr >> PAGE_SHIFT), prot);
		if (err)
			break;
	} while (pgd++, addr = next, addr != end);
	return err;
}
EXPORT_SYMBOL(remap_pfn_range);

1387
1388
1389
1390
1391
1392
1393
1394
1395
/*
 * handle_pte_fault chooses page fault handler according to an entry
 * which was read non-atomically.  Before making any commitment, on
 * those architectures or configurations (e.g. i386 with PAE) which
 * might give a mix of unmatched parts, do_swap_page and do_file_page
 * must check under lock before unmapping the pte and proceeding
 * (but do_wp_page is only called after already making such a check;
 * and do_anonymous_page and do_no_page can safely check later on).
 */
Hugh Dickins's avatar