page_alloc.c 183 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
29
30
31
32
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
33
#include <linux/ratelimit.h>
34
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37
38
39
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
40
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
41
42
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
43
#include <linux/vmstat.h>
44
#include <linux/mempolicy.h>
45
#include <linux/stop_machine.h>
46
47
#include <linux/sort.h>
#include <linux/pfn.h>
48
#include <linux/backing-dev.h>
49
#include <linux/fault-inject.h>
50
#include <linux/page-isolation.h>
51
#include <linux/page_cgroup.h>
52
#include <linux/debugobjects.h>
53
#include <linux/kmemleak.h>
54
#include <linux/compaction.h>
55
#include <trace/events/kmem.h>
56
#include <linux/ftrace_event.h>
57
#include <linux/memcontrol.h>
58
#include <linux/prefetch.h>
59
#include <linux/mm_inline.h>
60
#include <linux/migrate.h>
61
#include <linux/page-debug-flags.h>
62
#include <linux/hugetlb.h>
63
#include <linux/sched/rt.h>
Linus Torvalds's avatar
Linus Torvalds committed
64

65
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
66
#include <asm/tlbflush.h>
67
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
68
69
#include "internal.h"

70
71
72
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);

73
74
75
76
77
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

78
79
80
81
82
83
84
85
86
87
88
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
89
/*
90
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
91
 */
92
93
94
95
96
97
98
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
99
100
101
#endif
#ifdef CONFIG_MOVABLE_NODE
	[N_MEMORY] = { { [0] = 1UL } },
102
103
104
105
106
107
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

108
109
110
/* Protect totalram_pages and zone->managed_pages */
static DEFINE_SPINLOCK(managed_page_count_lock);

111
unsigned long totalram_pages __read_mostly;
112
unsigned long totalreserve_pages __read_mostly;
113
114
115
116
117
118
119
120
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

121
int percpu_pagelist_fraction;
122
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
123

124
125
126
127
128
129
130
131
132
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
133
134
135
136

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
137
138
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
139
140
141
142
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
143
144
}

145
void pm_restrict_gfp_mask(void)
146
147
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
148
149
150
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
151
}
152
153
154
155
156
157
158

bool pm_suspended_storage(void)
{
	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
		return false;
	return true;
}
159
160
#endif /* CONFIG_PM_SLEEP */

161
162
163
164
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

165
static void __free_pages_ok(struct page *page, unsigned int order);
166

Linus Torvalds's avatar
Linus Torvalds committed
167
168
169
170
171
172
173
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
174
175
176
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
177
 */
178
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
179
#ifdef CONFIG_ZONE_DMA
180
	 256,
181
#endif
182
#ifdef CONFIG_ZONE_DMA32
183
	 256,
184
#endif
185
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
186
	 32,
187
#endif
Mel Gorman's avatar
Mel Gorman committed
188
	 32,
189
};
Linus Torvalds's avatar
Linus Torvalds committed
190
191
192

EXPORT_SYMBOL(totalram_pages);

193
static char * const zone_names[MAX_NR_ZONES] = {
194
#ifdef CONFIG_ZONE_DMA
195
	 "DMA",
196
#endif
197
#ifdef CONFIG_ZONE_DMA32
198
	 "DMA32",
199
#endif
200
	 "Normal",
201
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
202
	 "HighMem",
203
#endif
Mel Gorman's avatar
Mel Gorman committed
204
	 "Movable",
205
206
};

Linus Torvalds's avatar
Linus Torvalds committed
207
int min_free_kbytes = 1024;
208
int user_min_free_kbytes = -1;
Linus Torvalds's avatar
Linus Torvalds committed
209

210
211
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
212
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
213

Tejun Heo's avatar
Tejun Heo committed
214
215
216
217
218
219
220
221
222
223
224
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
225

Miklos Szeredi's avatar
Miklos Szeredi committed
226
227
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
228
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
229
EXPORT_SYMBOL(nr_node_ids);
230
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
231
232
#endif

233
234
int page_group_by_mobility_disabled __read_mostly;

235
void set_pageblock_migratetype(struct page *page, int migratetype)
236
{
237
238
	if (unlikely(page_group_by_mobility_disabled &&
		     migratetype < MIGRATE_PCPTYPES))
239
240
		migratetype = MIGRATE_UNMOVABLE;

241
242
243
244
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

245
246
bool oom_killer_disabled __read_mostly;

Nick Piggin's avatar
Nick Piggin committed
247
#ifdef CONFIG_DEBUG_VM
248
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
249
{
250
251
252
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
253
	unsigned long sp, start_pfn;
254

255
256
	do {
		seq = zone_span_seqbegin(zone);
257
258
		start_pfn = zone->zone_start_pfn;
		sp = zone->spanned_pages;
259
		if (!zone_spans_pfn(zone, pfn))
260
261
262
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

263
	if (ret)
264
265
266
		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
			pfn, zone_to_nid(zone), zone->name,
			start_pfn, start_pfn + sp);
267

268
	return ret;
269
270
271
272
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
273
	if (!pfn_valid_within(page_to_pfn(page)))
274
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
275
	if (zone != page_zone(page))
276
277
278
279
280
281
282
283
284
285
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
286
		return 1;
287
288
289
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
290
291
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
292
293
294
295
296
297
298
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

299
300
static void bad_page(struct page *page, const char *reason,
		unsigned long bad_flags)
Linus Torvalds's avatar
Linus Torvalds committed
301
{
302
303
304
305
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

306
307
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
308
		page_mapcount_reset(page); /* remove PageBuddy */
309
310
311
		return;
	}

312
313
314
315
316
317
318
319
320
321
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
322
323
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
324
325
326
327
328
329
330
331
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

332
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
333
		current->comm, page_to_pfn(page));
334
	dump_page_badflags(page, reason, bad_flags);
335

336
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
337
	dump_stack();
338
out:
339
	/* Leave bad fields for debug, except PageBuddy could make trouble */
340
	page_mapcount_reset(page); /* remove PageBuddy */
341
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
342
343
344
345
346
347
348
349
350
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
351
352
 * All pages have PG_compound set.  All tail pages have their ->first_page
 * pointing at the head page.
Linus Torvalds's avatar
Linus Torvalds committed
353
 *
354
355
356
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
357
 */
358
359
360

static void free_compound_page(struct page *page)
{
361
	__free_pages_ok(page, compound_order(page));
362
363
}

364
void prep_compound_page(struct page *page, unsigned long order)
365
366
367
368
369
370
371
372
373
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
374
		set_page_count(p, 0);
375
		p->first_page = page;
David Rientjes's avatar
David Rientjes committed
376
377
378
		/* Make sure p->first_page is always valid for PageTail() */
		smp_wmb();
		__SetPageTail(p);
379
380
381
	}
}

382
/* update __split_huge_page_refcount if you change this function */
383
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
384
385
386
{
	int i;
	int nr_pages = 1 << order;
387
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
388

389
	if (unlikely(compound_order(page) != order)) {
390
		bad_page(page, "wrong compound order", 0);
391
392
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
393

394
	__ClearPageHead(page);
395

396
397
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
398

399
400
401
402
403
		if (unlikely(!PageTail(p))) {
			bad_page(page, "PageTail not set", 0);
			bad++;
		} else if (unlikely(p->first_page != page)) {
			bad_page(page, "first_page not consistent", 0);
404
405
			bad++;
		}
406
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
407
	}
408
409

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
410
411
}

Nick Piggin's avatar
Nick Piggin committed
412
413
414
415
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

416
417
418
419
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
420
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
421
422
423
424
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;

static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
		return 0;
	}
	_debug_guardpage_minorder = res;
	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
	return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

static inline void set_page_guard_flag(struct page *page)
{
	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}

static inline void clear_page_guard_flag(struct page *page)
{
	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}
#else
static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
#endif

456
457
static inline void set_page_order(struct page *page, int order)
{
458
	set_page_private(page, order);
459
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
460
461
462
463
}

static inline void rmv_page_order(struct page *page)
{
464
	__ClearPageBuddy(page);
465
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
483
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
484
485
 */
static inline unsigned long
486
__find_buddy_index(unsigned long page_idx, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
487
{
488
	return page_idx ^ (1 << order);
Linus Torvalds's avatar
Linus Torvalds committed
489
490
491
492
493
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
494
 * (a) the buddy is not in a hole &&
495
 * (b) the buddy is in the buddy system &&
496
497
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
498
 *
499
500
501
502
 * For recording whether a page is in the buddy system, we set ->_mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE.
 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
 * serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
503
 *
504
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
505
 */
506
507
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
508
{
509
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
510
511
		return 0;

512
	if (page_is_guard(buddy) && page_order(buddy) == order) {
513
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
514
515
516
517

		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

518
519
520
		return 1;
	}

521
	if (PageBuddy(buddy) && page_order(buddy) == order) {
522
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
523
524
525
526
527
528
529
530
531

		/*
		 * zone check is done late to avoid uselessly
		 * calculating zone/node ids for pages that could
		 * never merge.
		 */
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

532
		return 1;
533
	}
534
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
550
551
552
 * free pages of length of (1 << order) and marked with _mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 * field.
Linus Torvalds's avatar
Linus Torvalds committed
553
 * So when we are allocating or freeing one, we can derive the state of the
554
555
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
Linus Torvalds's avatar
Linus Torvalds committed
556
 * If a block is freed, and its buddy is also free, then this
557
 * triggers coalescing into a block of larger size.
Linus Torvalds's avatar
Linus Torvalds committed
558
 *
559
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
560
561
 */

Nick Piggin's avatar
Nick Piggin committed
562
static inline void __free_one_page(struct page *page,
563
		unsigned long pfn,
564
565
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
566
567
{
	unsigned long page_idx;
568
	unsigned long combined_idx;
569
	unsigned long uninitialized_var(buddy_idx);
570
	struct page *buddy;
Linus Torvalds's avatar
Linus Torvalds committed
571

572
573
	VM_BUG_ON(!zone_is_initialized(zone));

574
	if (unlikely(PageCompound(page)))
575
576
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
577

578
579
	VM_BUG_ON(migratetype == -1);

580
	page_idx = pfn & ((1 << MAX_ORDER) - 1);
Linus Torvalds's avatar
Linus Torvalds committed
581

582
583
	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
	VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds's avatar
Linus Torvalds committed
584
585

	while (order < MAX_ORDER-1) {
586
587
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
588
		if (!page_is_buddy(page, buddy, order))
589
			break;
590
591
592
593
594
595
596
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
		if (page_is_guard(buddy)) {
			clear_page_guard_flag(buddy);
			set_page_private(page, 0);
597
598
			__mod_zone_freepage_state(zone, 1 << order,
						  migratetype);
599
600
601
602
603
		} else {
			list_del(&buddy->lru);
			zone->free_area[order].nr_free--;
			rmv_page_order(buddy);
		}
604
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
605
606
607
608
609
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
610
611
612
613
614
615
616
617
618

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
619
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
620
		struct page *higher_page, *higher_buddy;
621
622
623
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
624
		higher_buddy = higher_page + (buddy_idx - combined_idx);
625
626
627
628
629
630
631
632
633
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
634
635
636
	zone->free_area[order].nr_free++;
}

637
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
638
{
639
	const char *bad_reason = NULL;
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
	}
	if (unlikely(mem_cgroup_bad_page_check(page)))
		bad_reason = "cgroup check failed";
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
656
		return 1;
657
	}
658
	page_cpupid_reset_last(page);
659
660
661
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
662
663
664
}

/*
665
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
666
 * Assumes all pages on list are in same zone, and of same order.
667
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
668
669
670
671
672
673
674
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
675
676
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
677
{
678
	int migratetype = 0;
679
	int batch_free = 0;
680
	int to_free = count;
681

Nick Piggin's avatar
Nick Piggin committed
682
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
683
	zone->pages_scanned = 0;
684

685
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
686
		struct page *page;
687
688
689
		struct list_head *list;

		/*
690
691
692
693
694
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
695
696
		 */
		do {
697
			batch_free++;
698
699
700
701
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
702

703
704
705
706
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

707
		do {
708
709
			int mt;	/* migratetype of the to-be-freed page */

710
711
712
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
713
			mt = get_freepage_migratetype(page);
714
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
715
			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
716
			trace_mm_page_pcpu_drain(page, 0, mt);
717
			if (likely(!is_migrate_isolate_page(page))) {
718
719
720
721
				__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
				if (is_migrate_cma(mt))
					__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
			}
722
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
723
	}
Nick Piggin's avatar
Nick Piggin committed
724
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
725
726
}

727
728
729
static void free_one_page(struct zone *zone,
				struct page *page, unsigned long pfn,
				int order,
730
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
731
{
732
733
	spin_lock(&zone->lock);
	zone->pages_scanned = 0;
734

735
	__free_one_page(page, pfn, zone, order, migratetype);
736
	if (unlikely(!is_migrate_isolate(migratetype)))
737
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
738
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
739
740
}

741
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
742
{
Linus Torvalds's avatar
Linus Torvalds committed
743
	int i;
744
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
745

746
	trace_mm_page_free(page, order);
747
748
	kmemcheck_free_shadow(page, order);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
749
750
751
752
	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);
753
	if (bad)
754
		return false;
755

756
	if (!PageHighMem(page)) {
757
758
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE << order);
759
760
761
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
762
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
763
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
764

765
766
767
768
769
770
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
771
	int migratetype;
772
	unsigned long pfn = page_to_pfn(page);
773
774
775
776

	if (!free_pages_prepare(page, order))
		return;

Nick Piggin's avatar
Nick Piggin committed
777
	local_irq_save(flags);
778
	__count_vm_events(PGFREE, 1 << order);
779
	migratetype = get_pfnblock_migratetype(page, pfn);
780
	set_freepage_migratetype(page, migratetype);
781
	free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
782
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
783
784
}

785
void __init __free_pages_bootmem(struct page *page, unsigned int order)
786
{
787
	unsigned int nr_pages = 1 << order;
788
	struct page *p = page;
789
	unsigned int loop;
790

791
792
793
	prefetchw(p);
	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
		prefetchw(p + 1);
794
795
		__ClearPageReserved(p);
		set_page_count(p, 0);
796
	}
797
798
	__ClearPageReserved(p);
	set_page_count(p, 0);
799

800
	page_zone(page)->managed_pages += nr_pages;
801
802
	set_page_refcounted(page);
	__free_pages(page, order);
803
804
}

805
#ifdef CONFIG_CMA
806
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
807
808
809
810
811
812
813
814
815
816
817
818
819
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
	} while (++p, --i);

	set_page_refcounted(page);
	set_pageblock_migratetype(page, MIGRATE_CMA);
	__free_pages(page, pageblock_order);
820
	adjust_managed_page_count(page, pageblock_nr_pages);
821
822
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
823
824
825
826
827
828
829
830
831
832
833
834
835

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
836
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
837
 */
Nick Piggin's avatar
Nick Piggin committed
838
static inline void expand(struct zone *zone, struct page *page,
839
840
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
841
842
843
844
845
846
847
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
848
		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
849
850
851
852
853
854
855
856
857
858
859
860
861

#ifdef CONFIG_DEBUG_PAGEALLOC
		if (high < debug_guardpage_minorder()) {
			/*
			 * Mark as guard pages (or page), that will allow to
			 * merge back to allocator when buddy will be freed.
			 * Corresponding page table entries will not be touched,
			 * pages will stay not present in virtual address space
			 */
			INIT_LIST_HEAD(&page[size].lru);
			set_page_guard_flag(&page[size]);
			set_page_private(&page[size], high);
			/* Guard pages are not available for any usage */
862
863
			__mod_zone_freepage_state(zone, -(1 << high),
						  migratetype);
864
865
866
			continue;
		}
#endif
867
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
868
869
870
871
872
873
874
875
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
876
static inline int check_new_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
877
{
878
	const char *bad_reason = NULL;
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
	}
	if (unlikely(mem_cgroup_bad_page_check(page)))
		bad_reason = "cgroup check failed";
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
895
		return 1;
896
	}
897
898
899
900
901
902
903
904
905
906
907
908
	return 0;
}

static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;
		if (unlikely(check_new_page(p)))
			return 1;
	}
909

910
	set_page_private(page, 0);
911
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
912
913

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
914
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
915
916
917
918
919
920
921

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

922
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
923
924
}

925
926
927
928
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
929
930
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
931
932
933
						int migratetype)
{
	unsigned int current_order;
934
	struct free_area *area;
935
936
937
938
939
940
941
942
943
944
945
946
947
948
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		expand(zone, page, order, current_order, area, migratetype);
949
		set_freepage_migratetype(page, migratetype);
950
951
952
953
954
955
956
		return page;
	}

	return NULL;
}


957
958
959
960
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
961
962
963
964
965
966
967
968
969
static int fallbacks[MIGRATE_TYPES][4] = {
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
#ifdef CONFIG_CMA
	[MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
#else
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
#endif
970
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
971
#ifdef CONFIG_MEMORY_ISOLATION
972
	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
973
#endif
974
975
};

976
977
/*
 * Move the free pages in a range to the free lists of the requested type.
978
 * Note that start_page and end_pages are not aligned on a pageblock
979
980
 * boundary. If alignment is required, use move_freepages_block()
 */
981
int move_freepages(struct zone *zone,
Adrian Bunk's avatar
Adrian Bunk committed
982
983
			  struct page *start_page, struct page *end_page,
			  int migratetype)
984
985
986
{
	struct page *page;
	unsigned long order;
987
	int pages_moved = 0;
988
989
990
991
992
993
994

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
995
	 * grouping pages by mobility
996
997
998
999
1000
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
1001
		/* Make sure we are not inadvertently changing nodes */
1002
		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1003

1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(