page_alloc.c 168 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
29
30
31
32
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
33
#include <linux/ratelimit.h>
34
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37
38
39
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
40
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
41
42
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
43
#include <linux/vmstat.h>
44
#include <linux/mempolicy.h>
45
#include <linux/stop_machine.h>
46
47
#include <linux/sort.h>
#include <linux/pfn.h>
48
#include <linux/backing-dev.h>
49
#include <linux/fault-inject.h>
50
#include <linux/page-isolation.h>
51
#include <linux/page_cgroup.h>
52
#include <linux/debugobjects.h>
53
#include <linux/kmemleak.h>
54
#include <linux/compaction.h>
55
#include <trace/events/kmem.h>
56
#include <linux/ftrace_event.h>
57
#include <linux/memcontrol.h>
58
#include <linux/prefetch.h>
59
#include <linux/migrate.h>
60
#include <linux/page-debug-flags.h>
Linus Torvalds's avatar
Linus Torvalds committed
61
62

#include <asm/tlbflush.h>
63
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
64
65
#include "internal.h"

66
67
68
69
70
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

71
72
73
74
75
76
77
78
79
80
81
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
82
/*
83
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
84
 */
85
86
87
88
89
90
91
92
93
94
95
96
97
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

98
unsigned long totalram_pages __read_mostly;
99
unsigned long totalreserve_pages __read_mostly;
100
101
102
103
104
105
106
107
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

108
int percpu_pagelist_fraction;
109
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
110

111
112
113
114
115
116
117
118
119
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
120
121
122
123

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
124
125
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
126
127
128
129
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
130
131
}

132
void pm_restrict_gfp_mask(void)
133
134
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
135
136
137
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
138
}
139
140
141
142
143
144
145

bool pm_suspended_storage(void)
{
	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
		return false;
	return true;
}
146
147
#endif /* CONFIG_PM_SLEEP */

148
149
150
151
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

152
static void __free_pages_ok(struct page *page, unsigned int order);
153

Linus Torvalds's avatar
Linus Torvalds committed
154
155
156
157
158
159
160
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
161
162
163
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
164
 */
165
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
166
#ifdef CONFIG_ZONE_DMA
167
	 256,
168
#endif
169
#ifdef CONFIG_ZONE_DMA32
170
	 256,
171
#endif
172
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
173
	 32,
174
#endif
Mel Gorman's avatar
Mel Gorman committed
175
	 32,
176
};
Linus Torvalds's avatar
Linus Torvalds committed
177
178
179

EXPORT_SYMBOL(totalram_pages);

180
static char * const zone_names[MAX_NR_ZONES] = {
181
#ifdef CONFIG_ZONE_DMA
182
	 "DMA",
183
#endif
184
#ifdef CONFIG_ZONE_DMA32
185
	 "DMA32",
186
#endif
187
	 "Normal",
188
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
189
	 "HighMem",
190
#endif
Mel Gorman's avatar
Mel Gorman committed
191
	 "Movable",
192
193
};

Linus Torvalds's avatar
Linus Torvalds committed
194
195
int min_free_kbytes = 1024;

196
197
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
198
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
199

Tejun Heo's avatar
Tejun Heo committed
200
201
202
203
204
205
206
207
208
209
210
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
211

Miklos Szeredi's avatar
Miklos Szeredi committed
212
213
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
214
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
215
EXPORT_SYMBOL(nr_node_ids);
216
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
217
218
#endif

219
220
int page_group_by_mobility_disabled __read_mostly;

221
222
223
224
225
/*
 * NOTE:
 * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly.
 * Instead, use {un}set_pageblock_isolate.
 */
226
void set_pageblock_migratetype(struct page *page, int migratetype)
227
{
228
229
230
231

	if (unlikely(page_group_by_mobility_disabled))
		migratetype = MIGRATE_UNMOVABLE;

232
233
234
235
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

236
237
bool oom_killer_disabled __read_mostly;

Nick Piggin's avatar
Nick Piggin committed
238
#ifdef CONFIG_DEBUG_VM
239
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
240
{
241
242
243
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
244

245
246
247
248
249
250
251
252
253
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
254
255
256
257
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
258
	if (!pfn_valid_within(page_to_pfn(page)))
259
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
260
	if (zone != page_zone(page))
261
262
263
264
265
266
267
268
269
270
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
271
		return 1;
272
273
274
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
275
276
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
277
278
279
280
281
282
283
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

284
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
285
{
286
287
288
289
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

290
291
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
292
		reset_page_mapcount(page); /* remove PageBuddy */
293
294
295
		return;
	}

296
297
298
299
300
301
302
303
304
305
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
306
307
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
308
309
310
311
312
313
314
315
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

316
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
317
		current->comm, page_to_pfn(page));
318
	dump_page(page);
319

320
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
321
	dump_stack();
322
out:
323
	/* Leave bad fields for debug, except PageBuddy could make trouble */
324
	reset_page_mapcount(page); /* remove PageBuddy */
325
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
326
327
328
329
330
331
332
333
334
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
335
336
 * All pages have PG_compound set.  All tail pages have their ->first_page
 * pointing at the head page.
Linus Torvalds's avatar
Linus Torvalds committed
337
 *
338
339
340
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
341
 */
342
343
344

static void free_compound_page(struct page *page)
{
345
	__free_pages_ok(page, compound_order(page));
346
347
}

348
void prep_compound_page(struct page *page, unsigned long order)
349
350
351
352
353
354
355
356
357
358
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
		__SetPageTail(p);
359
		set_page_count(p, 0);
360
361
362
363
		p->first_page = page;
	}
}

364
/* update __split_huge_page_refcount if you change this function */
365
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
366
367
368
{
	int i;
	int nr_pages = 1 << order;
369
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
370

371
372
	if (unlikely(compound_order(page) != order) ||
	    unlikely(!PageHead(page))) {
373
		bad_page(page);
374
375
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
376

377
	__ClearPageHead(page);
378

379
380
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
381

382
		if (unlikely(!PageTail(p) || (p->first_page != page))) {
383
			bad_page(page);
384
385
			bad++;
		}
386
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
387
	}
388
389

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
390
391
}

Nick Piggin's avatar
Nick Piggin committed
392
393
394
395
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

396
397
398
399
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
400
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
401
402
403
404
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;

static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
		return 0;
	}
	_debug_guardpage_minorder = res;
	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
	return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

static inline void set_page_guard_flag(struct page *page)
{
	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}

static inline void clear_page_guard_flag(struct page *page)
{
	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}
#else
static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
#endif

436
437
static inline void set_page_order(struct page *page, int order)
{
438
	set_page_private(page, order);
439
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
440
441
442
443
}

static inline void rmv_page_order(struct page *page)
{
444
	__ClearPageBuddy(page);
445
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
463
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
464
465
 */
static inline unsigned long
466
__find_buddy_index(unsigned long page_idx, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
467
{
468
	return page_idx ^ (1 << order);
Linus Torvalds's avatar
Linus Torvalds committed
469
470
471
472
473
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
474
 * (a) the buddy is not in a hole &&
475
 * (b) the buddy is in the buddy system &&
476
477
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
478
 *
Andrea Arcangeli's avatar
Andrea Arcangeli committed
479
480
 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
481
 *
482
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
483
 */
484
485
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
486
{
487
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
488
489
		return 0;

490
491
492
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

493
494
495
496
497
	if (page_is_guard(buddy) && page_order(buddy) == order) {
		VM_BUG_ON(page_count(buddy) != 0);
		return 1;
	}

498
	if (PageBuddy(buddy) && page_order(buddy) == order) {
499
		VM_BUG_ON(page_count(buddy) != 0);
500
		return 1;
501
	}
502
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
Andrea Arcangeli's avatar
Andrea Arcangeli committed
518
 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
519
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
520
 * So when we are allocating or freeing one, we can derive the state of the
521
522
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
Linus Torvalds's avatar
Linus Torvalds committed
523
 * If a block is freed, and its buddy is also free, then this
524
 * triggers coalescing into a block of larger size.
Linus Torvalds's avatar
Linus Torvalds committed
525
526
527
528
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
529
static inline void __free_one_page(struct page *page,
530
531
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
532
533
{
	unsigned long page_idx;
534
	unsigned long combined_idx;
535
	unsigned long uninitialized_var(buddy_idx);
536
	struct page *buddy;
Linus Torvalds's avatar
Linus Torvalds committed
537

538
	if (unlikely(PageCompound(page)))
539
540
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
541

542
543
	VM_BUG_ON(migratetype == -1);

Linus Torvalds's avatar
Linus Torvalds committed
544
545
	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

546
	VM_BUG_ON(page_idx & ((1 << order) - 1));
Nick Piggin's avatar
Nick Piggin committed
547
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
548
549

	while (order < MAX_ORDER-1) {
550
551
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
552
		if (!page_is_buddy(page, buddy, order))
553
			break;
554
555
556
557
558
559
560
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
		if (page_is_guard(buddy)) {
			clear_page_guard_flag(buddy);
			set_page_private(page, 0);
561
562
			__mod_zone_freepage_state(zone, 1 << order,
						  migratetype);
563
564
565
566
567
		} else {
			list_del(&buddy->lru);
			zone->free_area[order].nr_free--;
			rmv_page_order(buddy);
		}
568
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
569
570
571
572
573
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
574
575
576
577
578
579
580
581
582

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
583
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
584
		struct page *higher_page, *higher_buddy;
585
586
587
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
588
		higher_buddy = higher_page + (buddy_idx - combined_idx);
589
590
591
592
593
594
595
596
597
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
598
599
600
	zone->free_area[order].nr_free++;
}

601
602
603
604
605
606
607
608
609
610
611
/*
 * free_page_mlock() -- clean up attempts to free and mlocked() page.
 * Page should not be on lru, so no need to fix that up.
 * free_pages_check() will verify...
 */
static inline void free_page_mlock(struct page *page)
{
	__dec_zone_page_state(page, NR_MLOCK);
	__count_vm_event(UNEVICTABLE_MLOCKFREED);
}

612
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
613
{
Nick Piggin's avatar
Nick Piggin committed
614
615
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
616
		(atomic_read(&page->_count) != 0) |
617
618
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
		(mem_cgroup_bad_page_check(page)))) {
619
		bad_page(page);
620
		return 1;
621
	}
622
623
624
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
625
626
627
}

/*
628
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
629
 * Assumes all pages on list are in same zone, and of same order.
630
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
631
632
633
634
635
636
637
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
638
639
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
640
{
641
	int migratetype = 0;
642
	int batch_free = 0;
643
	int to_free = count;
644

Nick Piggin's avatar
Nick Piggin committed
645
	spin_lock(&zone->lock);
646
	zone->all_unreclaimable = 0;
Linus Torvalds's avatar
Linus Torvalds committed
647
	zone->pages_scanned = 0;
648

649
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
650
		struct page *page;
651
652
653
		struct list_head *list;

		/*
654
655
656
657
658
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
659
660
		 */
		do {
661
			batch_free++;
662
663
664
665
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
666

667
668
669
670
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

671
		do {
672
673
			int mt;	/* migratetype of the to-be-freed page */

674
675
676
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
677
			mt = get_freepage_migratetype(page);
678
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
679
680
			__free_one_page(page, zone, 0, mt);
			trace_mm_page_pcpu_drain(page, 0, mt);
681
682
			if (is_migrate_cma(mt))
				__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
683
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
684
	}
685
	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
Nick Piggin's avatar
Nick Piggin committed
686
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
687
688
}

689
690
static void free_one_page(struct zone *zone, struct page *page, int order,
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
691
{
692
	spin_lock(&zone->lock);
693
	zone->all_unreclaimable = 0;
694
	zone->pages_scanned = 0;
695

696
	__free_one_page(page, zone, order, migratetype);
697
	if (unlikely(migratetype != MIGRATE_ISOLATE))
698
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
699
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
700
701
}

702
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
703
{
Linus Torvalds's avatar
Linus Torvalds committed
704
	int i;
705
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
706

707
	trace_mm_page_free(page, order);
708
709
	kmemcheck_free_shadow(page, order);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
710
711
712
713
	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);
714
	if (bad)
715
		return false;
716

717
	if (!PageHighMem(page)) {
Nick Piggin's avatar
Nick Piggin committed
718
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
719
720
721
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
722
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
723
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
724

725
726
727
728
729
730
731
732
733
734
735
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
	int wasMlocked = __TestClearPageMlocked(page);

	if (!free_pages_prepare(page, order))
		return;

Nick Piggin's avatar
Nick Piggin committed
736
	local_irq_save(flags);
737
	if (unlikely(wasMlocked))
738
		free_page_mlock(page);
739
	__count_vm_events(PGFREE, 1 << order);
740
741
	free_one_page(page_zone(page), page, order,
					get_pageblock_migratetype(page));
Nick Piggin's avatar
Nick Piggin committed
742
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
743
744
}

745
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
746
{
747
748
	unsigned int nr_pages = 1 << order;
	unsigned int loop;
749

750
751
752
753
754
755
756
757
	prefetchw(page);
	for (loop = 0; loop < nr_pages; loop++) {
		struct page *p = &page[loop];

		if (loop + 1 < nr_pages)
			prefetchw(p + 1);
		__ClearPageReserved(p);
		set_page_count(p, 0);
758
	}
759
760
761

	set_page_refcounted(page);
	__free_pages(page, order);
762
763
}

764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
#ifdef CONFIG_CMA
/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
	} while (++p, --i);

	set_page_refcounted(page);
	set_pageblock_migratetype(page, MIGRATE_CMA);
	__free_pages(page, pageblock_order);
	totalram_pages += pageblock_nr_pages;
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
797
static inline void expand(struct zone *zone, struct page *page,
798
799
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
800
801
802
803
804
805
806
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
807
		VM_BUG_ON(bad_range(zone, &page[size]));
808
809
810
811
812
813
814
815
816
817
818
819
820

#ifdef CONFIG_DEBUG_PAGEALLOC
		if (high < debug_guardpage_minorder()) {
			/*
			 * Mark as guard pages (or page), that will allow to
			 * merge back to allocator when buddy will be freed.
			 * Corresponding page table entries will not be touched,
			 * pages will stay not present in virtual address space
			 */
			INIT_LIST_HEAD(&page[size].lru);
			set_page_guard_flag(&page[size]);
			set_page_private(&page[size], high);
			/* Guard pages are not available for any usage */
821
822
			__mod_zone_freepage_state(zone, -(1 << high),
						  migratetype);
823
824
825
			continue;
		}
#endif
826
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
827
828
829
830
831
832
833
834
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
835
static inline int check_new_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
836
{
Nick Piggin's avatar
Nick Piggin committed
837
838
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
839
		(atomic_read(&page->_count) != 0)  |
840
841
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
		(mem_cgroup_bad_page_check(page)))) {
842
		bad_page(page);
843
		return 1;
844
	}
845
846
847
848
849
850
851
852
853
854
855
856
	return 0;
}

static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;
		if (unlikely(check_new_page(p)))
			return 1;
	}
857

858
	set_page_private(page, 0);
859
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
860
861

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
862
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
863
864
865
866
867
868
869

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

870
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
871
872
}

873
874
875
876
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
877
878
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}


904
905
906
907
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
908
909
910
911
912
913
914
915
916
static int fallbacks[MIGRATE_TYPES][4] = {
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
#ifdef CONFIG_CMA
	[MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
#else
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
#endif
917
918
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
919
920
};

921
922
/*
 * Move the free pages in a range to the free lists of the requested type.
923
 * Note that start_page and end_pages are not aligned on a pageblock
924
925
 * boundary. If alignment is required, use move_freepages_block()
 */
Adrian Bunk's avatar
Adrian Bunk committed
926
927
928
static int move_freepages(struct zone *zone,
			  struct page *start_page, struct page *end_page,
			  int migratetype)
929
930
931
{
	struct page *page;
	unsigned long order;
932
	int pages_moved = 0;
933
934
935
936
937
938
939

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
940
	 * grouping pages by mobility
941
942
943
944
945
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
946
947
948
		/* Make sure we are not inadvertently changing nodes */
		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));

949
950
951
952
953
954
955
956
957
958
959
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
960
961
		list_move(&page->lru,
			  &zone->free_area[order].free_list[migratetype]);
962
		page += 1 << order;
963
		pages_moved += 1 << order;
964
965
	}

966
	return pages_moved;
967
968
}

969
int move_freepages_block(struct zone *zone, struct page *page,
970
				int migratetype)
971
972
973
974
975
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

	start_pfn = page_to_pfn(page);
976
	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
977
	start_page = pfn_to_page(start_pfn);
978
979
	end_page = start_page + pageblock_nr_pages - 1;
	end_pfn = start_pfn + pageblock_nr_pages - 1;
980
981
982
983
984
985
986
987
988
989

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
}

990
991
992
993
994
995
996
997
998
999
1000
static void change_pageblock_range(struct page *pageblock_page,
					int start_order, int migratetype)
{
	int nr_pageblocks = 1 << (start_order - pageblock_order);

	while (nr_pageblocks--) {
		set_pageblock_migratetype(pageblock_page, migratetype);
		pageblock_page += pageblock_nr_pages;
	}
}

1001
/* Remove an element from the buddy allocator from the fallback list */
1002
1003
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1004
1005
1006
1007
1008
1009
1010
1011
1012
{
	struct free_area * area;
	int current_order;
	struct page *page;
	int migratetype, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1; current_order >= order;
						--current_order) {
1013
		for (i = 0;; i++) {
1014
1015
			migratetype = fallbacks[start_migratetype][i];

1016
1017
			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
1018
				break;
1019

1020
1021
1022
1023
1024
1025
1026
1027
1028
			area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
				continue;

			page = list_entry(area->free_list[migratetype].next,
					struct page, lru);
			area->nr_free--;

			/*
1029
			 * If breaking a large block of pages, move all free
1030
1031
			 * pages to the preferred allocation list. If falling
			 * back for a reclaimable kernel allocation, be more
Lucas De Marchi's avatar
Lucas De Marchi committed
1032
			 * aggressive about taking ownership of free pages
1033
1034
1035
1036
1037
1038
			 *
			 * On the other hand, never change migration
			 * type of MIGRATE_CMA pageblocks nor move CMA
			 * pages on different free lists. We don't
			 * want unmovable pages to be allocated from
			 * MIGRATE_CMA areas.
1039
			 */
1040
1041
1042
1043
1044
			if (!is_migrate_cma(migratetype) &&
			    (unlikely(current_order >= pageblock_order / 2) ||
			     start_migratetype == MIGRATE_RECLAIMABLE ||
			     page_group_by_mobility_disabled)) {
				int pages;
1045
1046
1047
1048
				pages = move_freepages_block(zone, page,
								start_migratetype);

				/* Claim the whole block if over half of it is free */
1049
1050
				if (pages >= (1 << (pageblock_order-1)) ||
						page_group_by_mobility_disabled)
1051
1052
1053
					set_pageblock_migratetype(page,
								start_migratetype);

1054
				migratetype = start_migratetype;
1055
			}
1056
1057
1058
1059
1060

			/* Remove the page from the freelists */
			list_del(&page->lru);
			rmv_page_order(page);

1061
			/* Take ownership for orders >= pageblock_order */