page_alloc.c 182 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30 31 32
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
33
#include <linux/ratelimit.h>
34
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
35 36 37 38 39
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
40
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
43
#include <linux/vmstat.h>
44
#include <linux/mempolicy.h>
45
#include <linux/stop_machine.h>
46 47
#include <linux/sort.h>
#include <linux/pfn.h>
48
#include <linux/backing-dev.h>
49
#include <linux/fault-inject.h>
50
#include <linux/page-isolation.h>
51
#include <linux/debugobjects.h>
52
#include <linux/kmemleak.h>
53
#include <linux/compaction.h>
54
#include <trace/events/kmem.h>
55
#include <linux/prefetch.h>
56
#include <linux/mm_inline.h>
57
#include <linux/migrate.h>
58
#include <linux/page-debug-flags.h>
59
#include <linux/hugetlb.h>
60
#include <linux/sched/rt.h>
Linus Torvalds's avatar
Linus Torvalds committed
61

62
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
63
#include <asm/tlbflush.h>
64
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
65 66
#include "internal.h"

67 68
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
69
#define MIN_PERCPU_PAGELIST_FRACTION	(8)
70

71 72 73 74 75
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

76 77 78 79 80 81 82 83 84
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
85
int _node_numa_mem_[MAX_NUMNODES];
86 87
#endif

Linus Torvalds's avatar
Linus Torvalds committed
88
/*
89
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
90
 */
91 92 93 94 95 96 97
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
98 99 100
#endif
#ifdef CONFIG_MOVABLE_NODE
	[N_MEMORY] = { { [0] = 1UL } },
101 102 103 104 105 106
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

107 108 109
/* Protect totalram_pages and zone->managed_pages */
static DEFINE_SPINLOCK(managed_page_count_lock);

110
unsigned long totalram_pages __read_mostly;
111
unsigned long totalreserve_pages __read_mostly;
112 113 114 115 116 117 118 119
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

120
int percpu_pagelist_fraction;
121
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
122

123 124 125 126 127 128 129 130 131
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
132 133 134 135

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
136 137
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
138 139 140 141
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
142 143
}

144
void pm_restrict_gfp_mask(void)
145 146
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
147 148 149
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
150
}
151 152 153 154 155 156 157

bool pm_suspended_storage(void)
{
	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
		return false;
	return true;
}
158 159
#endif /* CONFIG_PM_SLEEP */

160 161 162 163
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

164
static void __free_pages_ok(struct page *page, unsigned int order);
165

Linus Torvalds's avatar
Linus Torvalds committed
166 167 168 169 170 171 172
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
173 174 175
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
176
 */
177
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
178
#ifdef CONFIG_ZONE_DMA
179
	 256,
180
#endif
181
#ifdef CONFIG_ZONE_DMA32
182
	 256,
183
#endif
184
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
185
	 32,
186
#endif
Mel Gorman's avatar
Mel Gorman committed
187
	 32,
188
};
Linus Torvalds's avatar
Linus Torvalds committed
189 190 191

EXPORT_SYMBOL(totalram_pages);

192
static char * const zone_names[MAX_NR_ZONES] = {
193
#ifdef CONFIG_ZONE_DMA
194
	 "DMA",
195
#endif
196
#ifdef CONFIG_ZONE_DMA32
197
	 "DMA32",
198
#endif
199
	 "Normal",
200
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
201
	 "HighMem",
202
#endif
Mel Gorman's avatar
Mel Gorman committed
203
	 "Movable",
204 205
};

Linus Torvalds's avatar
Linus Torvalds committed
206
int min_free_kbytes = 1024;
207
int user_min_free_kbytes = -1;
Linus Torvalds's avatar
Linus Torvalds committed
208

209 210
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
211
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
212

Tejun Heo's avatar
Tejun Heo committed
213 214 215 216 217 218 219 220 221 222 223
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
224

Miklos Szeredi's avatar
Miklos Szeredi committed
225 226
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
227
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
228
EXPORT_SYMBOL(nr_node_ids);
229
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
230 231
#endif

232 233
int page_group_by_mobility_disabled __read_mostly;

234
void set_pageblock_migratetype(struct page *page, int migratetype)
235
{
236 237
	if (unlikely(page_group_by_mobility_disabled &&
		     migratetype < MIGRATE_PCPTYPES))
238 239
		migratetype = MIGRATE_UNMOVABLE;

240 241 242 243
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

244 245
bool oom_killer_disabled __read_mostly;

Nick Piggin's avatar
Nick Piggin committed
246
#ifdef CONFIG_DEBUG_VM
247
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
248
{
249 250 251
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
252
	unsigned long sp, start_pfn;
253

254 255
	do {
		seq = zone_span_seqbegin(zone);
256 257
		start_pfn = zone->zone_start_pfn;
		sp = zone->spanned_pages;
258
		if (!zone_spans_pfn(zone, pfn))
259 260 261
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

262
	if (ret)
263 264 265
		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
			pfn, zone_to_nid(zone), zone->name,
			start_pfn, start_pfn + sp);
266

267
	return ret;
268 269 270 271
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
272
	if (!pfn_valid_within(page_to_pfn(page)))
273
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
274
	if (zone != page_zone(page))
275 276 277 278 279 280 281 282 283 284
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
285
		return 1;
286 287 288
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
289 290
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
291 292 293 294 295 296 297
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

298 299
static void bad_page(struct page *page, const char *reason,
		unsigned long bad_flags)
Linus Torvalds's avatar
Linus Torvalds committed
300
{
301 302 303 304
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

305 306
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
307
		page_mapcount_reset(page); /* remove PageBuddy */
308 309 310
		return;
	}

311 312 313 314 315 316 317 318 319 320
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
321 322
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
323 324 325 326 327 328 329 330
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

331
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
332
		current->comm, page_to_pfn(page));
333
	dump_page_badflags(page, reason, bad_flags);
334

335
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
336
	dump_stack();
337
out:
338
	/* Leave bad fields for debug, except PageBuddy could make trouble */
339
	page_mapcount_reset(page); /* remove PageBuddy */
340
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
341 342 343 344 345 346 347 348 349
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
350 351
 * All pages have PG_compound set.  All tail pages have their ->first_page
 * pointing at the head page.
Linus Torvalds's avatar
Linus Torvalds committed
352
 *
353 354 355
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
356
 */
357 358 359

static void free_compound_page(struct page *page)
{
360
	__free_pages_ok(page, compound_order(page));
361 362
}

363
void prep_compound_page(struct page *page, unsigned long order)
364 365 366 367 368 369 370 371 372
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
373
		set_page_count(p, 0);
374
		p->first_page = page;
David Rientjes's avatar
David Rientjes committed
375 376 377
		/* Make sure p->first_page is always valid for PageTail() */
		smp_wmb();
		__SetPageTail(p);
378 379 380
	}
}

381
/* update __split_huge_page_refcount if you change this function */
382
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
383 384 385
{
	int i;
	int nr_pages = 1 << order;
386
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
387

388
	if (unlikely(compound_order(page) != order)) {
389
		bad_page(page, "wrong compound order", 0);
390 391
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
392

393
	__ClearPageHead(page);
394

395 396
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
397

398 399 400 401 402
		if (unlikely(!PageTail(p))) {
			bad_page(page, "PageTail not set", 0);
			bad++;
		} else if (unlikely(p->first_page != page)) {
			bad_page(page, "first_page not consistent", 0);
403 404
			bad++;
		}
405
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
406
	}
407 408

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
409 410
}

411 412
static inline void prep_zero_page(struct page *page, unsigned int order,
							gfp_t gfp_flags)
Nick Piggin's avatar
Nick Piggin committed
413 414 415
{
	int i;

416 417 418 419
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
420
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
421 422 423 424
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;

static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
		return 0;
	}
	_debug_guardpage_minorder = res;
	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
	return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

442 443
static inline void set_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
444 445
{
	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
446 447 448 449
	INIT_LIST_HEAD(&page->lru);
	set_page_private(page, order);
	/* Guard pages are not available for any usage */
	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
450 451
}

452 453
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
454 455
{
	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
456 457 458
	set_page_private(page, 0);
	if (!is_migrate_isolate(migratetype))
		__mod_zone_freepage_state(zone, (1 << order), migratetype);
459 460
}
#else
461 462 463 464
static inline void set_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
465 466
#endif

467
static inline void set_page_order(struct page *page, unsigned int order)
468
{
469
	set_page_private(page, order);
470
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
471 472 473 474
}

static inline void rmv_page_order(struct page *page)
{
475
	__ClearPageBuddy(page);
476
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
477 478 479 480 481
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
482
 * (a) the buddy is not in a hole &&
483
 * (b) the buddy is in the buddy system &&
484 485
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
486
 *
487 488 489 490
 * For recording whether a page is in the buddy system, we set ->_mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE.
 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
 * serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
491
 *
492
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
493
 */
494
static inline int page_is_buddy(struct page *page, struct page *buddy,
495
							unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
496
{
497
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
498 499
		return 0;

500
	if (page_is_guard(buddy) && page_order(buddy) == order) {
501
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
502 503 504 505

		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

506 507 508
		return 1;
	}

509
	if (PageBuddy(buddy) && page_order(buddy) == order) {
510
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
511 512 513 514 515 516 517 518 519

		/*
		 * zone check is done late to avoid uselessly
		 * calculating zone/node ids for pages that could
		 * never merge.
		 */
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

520
		return 1;
521
	}
522
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
538 539 540
 * free pages of length of (1 << order) and marked with _mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 * field.
Linus Torvalds's avatar
Linus Torvalds committed
541
 * So when we are allocating or freeing one, we can derive the state of the
542 543
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
Linus Torvalds's avatar
Linus Torvalds committed
544
 * If a block is freed, and its buddy is also free, then this
545
 * triggers coalescing into a block of larger size.
Linus Torvalds's avatar
Linus Torvalds committed
546
 *
547
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
548 549
 */

Nick Piggin's avatar
Nick Piggin committed
550
static inline void __free_one_page(struct page *page,
551
		unsigned long pfn,
552 553
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
554 555
{
	unsigned long page_idx;
556
	unsigned long combined_idx;
557
	unsigned long uninitialized_var(buddy_idx);
558
	struct page *buddy;
559
	int max_order = MAX_ORDER;
Linus Torvalds's avatar
Linus Torvalds committed
560

561 562
	VM_BUG_ON(!zone_is_initialized(zone));

563
	if (unlikely(PageCompound(page)))
564 565
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
566

567
	VM_BUG_ON(migratetype == -1);
568 569 570 571 572 573 574 575 576
	if (is_migrate_isolate(migratetype)) {
		/*
		 * We restrict max order of merging to prevent merge
		 * between freepages on isolate pageblock and normal
		 * pageblock. Without this, pageblock isolation
		 * could cause incorrect freepage accounting.
		 */
		max_order = min(MAX_ORDER, pageblock_order + 1);
	} else {
577
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
578
	}
579

580
	page_idx = pfn & ((1 << max_order) - 1);
Linus Torvalds's avatar
Linus Torvalds committed
581

582 583
	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
	VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds's avatar
Linus Torvalds committed
584

585
	while (order < max_order - 1) {
586 587
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
588
		if (!page_is_buddy(page, buddy, order))
589
			break;
590 591 592 593 594
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
		if (page_is_guard(buddy)) {
595
			clear_page_guard(zone, buddy, order, migratetype);
596 597 598 599 600
		} else {
			list_del(&buddy->lru);
			zone->free_area[order].nr_free--;
			rmv_page_order(buddy);
		}
601
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
602 603 604 605 606
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
607 608 609 610 611 612 613 614 615

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
616
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
617
		struct page *higher_page, *higher_buddy;
618 619 620
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
621
		higher_buddy = higher_page + (buddy_idx - combined_idx);
622 623 624 625 626 627 628 629 630
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
631 632 633
	zone->free_area[order].nr_free++;
}

634
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
635
{
636
	const char *bad_reason = NULL;
637 638 639 640 641 642 643 644 645 646 647 648
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
	}
649 650 651 652
#ifdef CONFIG_MEMCG
	if (unlikely(page->mem_cgroup))
		bad_reason = "page still charged to cgroup";
#endif
653 654
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
655
		return 1;
656
	}
657
	page_cpupid_reset_last(page);
658 659 660
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
661 662 663
}

/*
664
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
665
 * Assumes all pages on list are in same zone, and of same order.
666
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
667 668 669 670 671 672 673
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
674 675
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
676
{
677
	int migratetype = 0;
678
	int batch_free = 0;
679
	int to_free = count;
680
	unsigned long nr_scanned;
681

Nick Piggin's avatar
Nick Piggin committed
682
	spin_lock(&zone->lock);
683 684 685
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
686

687
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
688
		struct page *page;
689 690 691
		struct list_head *list;

		/*
692 693 694 695 696
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
697 698
		 */
		do {
699
			batch_free++;
700 701 702 703
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
704

705 706 707 708
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

709
		do {
710 711
			int mt;	/* migratetype of the to-be-freed page */

712 713 714
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
715
			mt = get_freepage_migratetype(page);
716
			if (unlikely(has_isolate_pageblock(zone)))
717 718
				mt = get_pageblock_migratetype(page);

719
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
720
			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
721
			trace_mm_page_pcpu_drain(page, 0, mt);
722
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
723
	}
Nick Piggin's avatar
Nick Piggin committed
724
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
725 726
}

727 728
static void free_one_page(struct zone *zone,
				struct page *page, unsigned long pfn,
729
				unsigned int order,
730
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
731
{
732
	unsigned long nr_scanned;
733
	spin_lock(&zone->lock);
734 735 736
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
737

738 739 740 741
	if (unlikely(has_isolate_pageblock(zone) ||
		is_migrate_isolate(migratetype))) {
		migratetype = get_pfnblock_migratetype(page, pfn);
	}
742
	__free_one_page(page, pfn, zone, order, migratetype);
743
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
744 745
}

746
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
747
{
Linus Torvalds's avatar
Linus Torvalds committed
748
	int i;
749
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
750

751 752 753
	VM_BUG_ON_PAGE(PageTail(page), page);
	VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);

754
	trace_mm_page_free(page, order);
755 756
	kmemcheck_free_shadow(page, order);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
757 758 759 760
	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);
761
	if (bad)
762
		return false;
763

764
	if (!PageHighMem(page)) {
765 766
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE << order);
767 768 769
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
770
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
771
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
772

773 774 775 776 777 778
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
779
	int migratetype;
780
	unsigned long pfn = page_to_pfn(page);
781 782 783 784

	if (!free_pages_prepare(page, order))
		return;

785
	migratetype = get_pfnblock_migratetype(page, pfn);
Nick Piggin's avatar
Nick Piggin committed
786
	local_irq_save(flags);
787
	__count_vm_events(PGFREE, 1 << order);
788
	set_freepage_migratetype(page, migratetype);
789
	free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
790
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
791 792
}

793
void __init __free_pages_bootmem(struct page *page, unsigned int order)
794
{
795
	unsigned int nr_pages = 1 << order;
796
	struct page *p = page;
797
	unsigned int loop;
798

799 800 801
	prefetchw(p);
	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
		prefetchw(p + 1);
802 803
		__ClearPageReserved(p);
		set_page_count(p, 0);
804
	}
805 806
	__ClearPageReserved(p);
	set_page_count(p, 0);
807

808
	page_zone(page)->managed_pages += nr_pages;
809 810
	set_page_refcounted(page);
	__free_pages(page, order);
811 812
}

813
#ifdef CONFIG_CMA
814
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
815 816 817 818 819 820 821 822 823 824 825
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
	} while (++p, --i);

	set_pageblock_migratetype(page, MIGRATE_CMA);
826 827 828 829 830 831 832 833 834 835 836 837 838 839

	if (pageblock_order >= MAX_ORDER) {
		i = pageblock_nr_pages;
		p = page;
		do {
			set_page_refcounted(p);
			__free_pages(p, MAX_ORDER - 1);
			p += MAX_ORDER_NR_PAGES;
		} while (i -= MAX_ORDER_NR_PAGES);
	} else {
		set_page_refcounted(page);
		__free_pages(page, pageblock_order);
	}

840
	adjust_managed_page_count(page, pageblock_nr_pages);
841 842
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
843 844 845 846 847 848 849 850 851 852 853 854 855

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
856
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
857
 */
Nick Piggin's avatar
Nick Piggin committed
858
static inline void expand(struct zone *zone, struct page *page,
859 860
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
861 862 863 864 865 866 867
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
868
		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
869

870 871
		if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
			high < debug_guardpage_minorder()) {
872 873 874 875 876 877
			/*
			 * Mark as guard pages (or page), that will allow to
			 * merge back to allocator when buddy will be freed.
			 * Corresponding page table entries will not be touched,
			 * pages will stay not present in virtual address space
			 */
878
			set_page_guard(zone, &page[size], high, migratetype);
879 880
			continue;
		}
881
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar