page_alloc.c 184 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
28
#include <linux/kasan.h>
Linus Torvalds's avatar
Linus Torvalds committed
29 30 31 32 33
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
34
#include <linux/ratelimit.h>
35
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39 40
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
41
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
42 43
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
44
#include <linux/vmstat.h>
45
#include <linux/mempolicy.h>
46
#include <linux/stop_machine.h>
47 48
#include <linux/sort.h>
#include <linux/pfn.h>
49
#include <linux/backing-dev.h>
50
#include <linux/fault-inject.h>
51
#include <linux/page-isolation.h>
52
#include <linux/page_ext.h>
53
#include <linux/debugobjects.h>
54
#include <linux/kmemleak.h>
55
#include <linux/compaction.h>
56
#include <trace/events/kmem.h>
57
#include <linux/prefetch.h>
58
#include <linux/mm_inline.h>
59
#include <linux/migrate.h>
60
#include <linux/page_ext.h>
61
#include <linux/hugetlb.h>
62
#include <linux/sched/rt.h>
63
#include <linux/page_owner.h>
Linus Torvalds's avatar
Linus Torvalds committed
64

65
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
66
#include <asm/tlbflush.h>
67
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
68 69
#include "internal.h"

70 71
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
72
#define MIN_PERCPU_PAGELIST_FRACTION	(8)
73

74 75 76 77 78
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

79 80 81 82 83 84 85 86 87
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
88
int _node_numa_mem_[MAX_NUMNODES];
89 90
#endif

Linus Torvalds's avatar
Linus Torvalds committed
91
/*
92
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
93
 */
94 95 96 97 98 99 100
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
101 102 103
#endif
#ifdef CONFIG_MOVABLE_NODE
	[N_MEMORY] = { { [0] = 1UL } },
104 105 106 107 108 109
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

110 111 112
/* Protect totalram_pages and zone->managed_pages */
static DEFINE_SPINLOCK(managed_page_count_lock);

113
unsigned long totalram_pages __read_mostly;
114
unsigned long totalreserve_pages __read_mostly;
115
unsigned long totalcma_pages __read_mostly;
116 117 118 119 120 121 122 123
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

124
int percpu_pagelist_fraction;
125
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
126

127 128 129 130 131 132 133 134 135
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
136 137 138 139

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
140 141
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
142 143 144 145
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
146 147
}

148
void pm_restrict_gfp_mask(void)
149 150
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
151 152 153
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
154
}
155 156 157 158 159 160 161

bool pm_suspended_storage(void)
{
	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
		return false;
	return true;
}
162 163
#endif /* CONFIG_PM_SLEEP */

164 165 166 167
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

168
static void __free_pages_ok(struct page *page, unsigned int order);
169

Linus Torvalds's avatar
Linus Torvalds committed
170 171 172 173 174 175
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai's avatar
Yaowei Bai committed
176
 *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
177 178 179
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
180
 */
181
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
182
#ifdef CONFIG_ZONE_DMA
183
	 256,
184
#endif
185
#ifdef CONFIG_ZONE_DMA32
186
	 256,
187
#endif
188
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
189
	 32,
190
#endif
Mel Gorman's avatar
Mel Gorman committed
191
	 32,
192
};
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195

EXPORT_SYMBOL(totalram_pages);

196
static char * const zone_names[MAX_NR_ZONES] = {
197
#ifdef CONFIG_ZONE_DMA
198
	 "DMA",
199
#endif
200
#ifdef CONFIG_ZONE_DMA32
201
	 "DMA32",
202
#endif
203
	 "Normal",
204
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
205
	 "HighMem",
206
#endif
Mel Gorman's avatar
Mel Gorman committed
207
	 "Movable",
208 209
};

Linus Torvalds's avatar
Linus Torvalds committed
210
int min_free_kbytes = 1024;
211
int user_min_free_kbytes = -1;
Linus Torvalds's avatar
Linus Torvalds committed
212

213 214
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
215
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
216

Tejun Heo's avatar
Tejun Heo committed
217 218 219 220 221 222 223 224 225 226 227
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
228

Miklos Szeredi's avatar
Miklos Szeredi committed
229 230
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
231
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
232
EXPORT_SYMBOL(nr_node_ids);
233
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
234 235
#endif

236 237
int page_group_by_mobility_disabled __read_mostly;

238
void set_pageblock_migratetype(struct page *page, int migratetype)
239
{
240 241
	if (unlikely(page_group_by_mobility_disabled &&
		     migratetype < MIGRATE_PCPTYPES))
242 243
		migratetype = MIGRATE_UNMOVABLE;

244 245 246 247
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

Nick Piggin's avatar
Nick Piggin committed
248
#ifdef CONFIG_DEBUG_VM
249
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
250
{
251 252 253
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
254
	unsigned long sp, start_pfn;
255

256 257
	do {
		seq = zone_span_seqbegin(zone);
258 259
		start_pfn = zone->zone_start_pfn;
		sp = zone->spanned_pages;
260
		if (!zone_spans_pfn(zone, pfn))
261 262 263
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

264
	if (ret)
265 266 267
		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
			pfn, zone_to_nid(zone), zone->name,
			start_pfn, start_pfn + sp);
268

269
	return ret;
270 271 272 273
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
274
	if (!pfn_valid_within(page_to_pfn(page)))
275
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
276
	if (zone != page_zone(page))
277 278 279 280 281 282 283 284 285 286
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
287
		return 1;
288 289 290
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
291 292
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
293 294 295 296 297 298 299
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

300 301
static void bad_page(struct page *page, const char *reason,
		unsigned long bad_flags)
Linus Torvalds's avatar
Linus Torvalds committed
302
{
303 304 305 306
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

307 308
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
309
		page_mapcount_reset(page); /* remove PageBuddy */
310 311 312
		return;
	}

313 314 315 316 317 318 319 320 321 322
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
323 324
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
325 326 327 328 329 330 331 332
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

333
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
334
		current->comm, page_to_pfn(page));
335
	dump_page_badflags(page, reason, bad_flags);
336

337
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
338
	dump_stack();
339
out:
340
	/* Leave bad fields for debug, except PageBuddy could make trouble */
341
	page_mapcount_reset(page); /* remove PageBuddy */
342
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
343 344 345 346 347 348 349 350 351
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
352 353
 * All pages have PG_compound set.  All tail pages have their ->first_page
 * pointing at the head page.
Linus Torvalds's avatar
Linus Torvalds committed
354
 *
355 356 357
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
358
 */
359 360 361

static void free_compound_page(struct page *page)
{
362
	__free_pages_ok(page, compound_order(page));
363 364
}

365
void prep_compound_page(struct page *page, unsigned long order)
366 367 368 369 370 371 372 373 374
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
375
		set_page_count(p, 0);
376
		p->first_page = page;
David Rientjes's avatar
David Rientjes committed
377 378 379
		/* Make sure p->first_page is always valid for PageTail() */
		smp_wmb();
		__SetPageTail(p);
380 381 382
	}
}

383 384
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
385
bool _debug_pagealloc_enabled __read_mostly;
386 387
bool _debug_guardpage_enabled __read_mostly;

388 389 390 391 392 393 394 395 396 397 398 399
static int __init early_debug_pagealloc(char *buf)
{
	if (!buf)
		return -EINVAL;

	if (strcmp(buf, "on") == 0)
		_debug_pagealloc_enabled = true;

	return 0;
}
early_param("debug_pagealloc", early_debug_pagealloc);

400 401
static bool need_debug_guardpage(void)
{
402 403 404 405
	/* If we don't use debug_pagealloc, we don't need guard page */
	if (!debug_pagealloc_enabled())
		return false;

406 407 408 409 410
	return true;
}

static void init_debug_guardpage(void)
{
411 412 413
	if (!debug_pagealloc_enabled())
		return;

414 415 416 417 418 419 420
	_debug_guardpage_enabled = true;
}

struct page_ext_operations debug_guardpage_ops = {
	.need = need_debug_guardpage,
	.init = init_debug_guardpage,
};
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
		return 0;
	}
	_debug_guardpage_minorder = res;
	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
	return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

436 437
static inline void set_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
438
{
439 440 441 442 443 444 445 446
	struct page_ext *page_ext;

	if (!debug_guardpage_enabled())
		return;

	page_ext = lookup_page_ext(page);
	__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);

447 448 449 450
	INIT_LIST_HEAD(&page->lru);
	set_page_private(page, order);
	/* Guard pages are not available for any usage */
	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
451 452
}

453 454
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
455
{
456 457 458 459 460 461 462 463
	struct page_ext *page_ext;

	if (!debug_guardpage_enabled())
		return;

	page_ext = lookup_page_ext(page);
	__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);

464 465 466
	set_page_private(page, 0);
	if (!is_migrate_isolate(migratetype))
		__mod_zone_freepage_state(zone, (1 << order), migratetype);
467 468
}
#else
469
struct page_ext_operations debug_guardpage_ops = { NULL, };
470 471 472 473
static inline void set_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
474 475
#endif

476
static inline void set_page_order(struct page *page, unsigned int order)
477
{
478
	set_page_private(page, order);
479
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
480 481 482 483
}

static inline void rmv_page_order(struct page *page)
{
484
	__ClearPageBuddy(page);
485
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
486 487 488 489 490
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
491
 * (a) the buddy is not in a hole &&
492
 * (b) the buddy is in the buddy system &&
493 494
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
495
 *
496 497 498 499
 * For recording whether a page is in the buddy system, we set ->_mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE.
 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
 * serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
500
 *
501
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
502
 */
503
static inline int page_is_buddy(struct page *page, struct page *buddy,
504
							unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
505
{
506
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
507 508
		return 0;

509
	if (page_is_guard(buddy) && page_order(buddy) == order) {
510 511 512
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

513 514
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);

515 516 517
		return 1;
	}

518
	if (PageBuddy(buddy) && page_order(buddy) == order) {
519 520 521 522 523 524 525 526
		/*
		 * zone check is done late to avoid uselessly
		 * calculating zone/node ids for pages that could
		 * never merge.
		 */
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

527 528
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);

529
		return 1;
530
	}
531
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
547 548 549
 * free pages of length of (1 << order) and marked with _mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 * field.
Linus Torvalds's avatar
Linus Torvalds committed
550
 * So when we are allocating or freeing one, we can derive the state of the
551 552
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
Linus Torvalds's avatar
Linus Torvalds committed
553
 * If a block is freed, and its buddy is also free, then this
554
 * triggers coalescing into a block of larger size.
Linus Torvalds's avatar
Linus Torvalds committed
555
 *
556
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
557 558
 */

Nick Piggin's avatar
Nick Piggin committed
559
static inline void __free_one_page(struct page *page,
560
		unsigned long pfn,
561 562
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
563 564
{
	unsigned long page_idx;
565
	unsigned long combined_idx;
566
	unsigned long uninitialized_var(buddy_idx);
567
	struct page *buddy;
568
	int max_order = MAX_ORDER;
Linus Torvalds's avatar
Linus Torvalds committed
569

570
	VM_BUG_ON(!zone_is_initialized(zone));
571
	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds's avatar
Linus Torvalds committed
572

573
	VM_BUG_ON(migratetype == -1);
574 575 576 577 578 579 580 581 582
	if (is_migrate_isolate(migratetype)) {
		/*
		 * We restrict max order of merging to prevent merge
		 * between freepages on isolate pageblock and normal
		 * pageblock. Without this, pageblock isolation
		 * could cause incorrect freepage accounting.
		 */
		max_order = min(MAX_ORDER, pageblock_order + 1);
	} else {
583
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
584
	}
585

586
	page_idx = pfn & ((1 << max_order) - 1);
Linus Torvalds's avatar
Linus Torvalds committed
587

588 589
	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
	VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds's avatar
Linus Torvalds committed
590

591
	while (order < max_order - 1) {
592 593
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
594
		if (!page_is_buddy(page, buddy, order))
595
			break;
596 597 598 599 600
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
		if (page_is_guard(buddy)) {
601
			clear_page_guard(zone, buddy, order, migratetype);
602 603 604 605 606
		} else {
			list_del(&buddy->lru);
			zone->free_area[order].nr_free--;
			rmv_page_order(buddy);
		}
607
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
608 609 610 611 612
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
613 614 615 616 617 618 619 620 621

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
622
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
623
		struct page *higher_page, *higher_buddy;
624 625 626
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
627
		higher_buddy = higher_page + (buddy_idx - combined_idx);
628 629 630 631 632 633 634 635 636
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
637 638 639
	zone->free_area[order].nr_free++;
}

640
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
641
{
642
	const char *bad_reason = NULL;
643 644 645 646 647 648 649 650 651 652 653 654
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
	}
655 656 657 658
#ifdef CONFIG_MEMCG
	if (unlikely(page->mem_cgroup))
		bad_reason = "page still charged to cgroup";
#endif
659 660
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
661
		return 1;
662
	}
663
	page_cpupid_reset_last(page);
664 665 666
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
667 668 669
}

/*
670
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
671
 * Assumes all pages on list are in same zone, and of same order.
672
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
673 674 675 676 677 678 679
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
680 681
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
682
{
683
	int migratetype = 0;
684
	int batch_free = 0;
685
	int to_free = count;
686
	unsigned long nr_scanned;
687

Nick Piggin's avatar
Nick Piggin committed
688
	spin_lock(&zone->lock);
689 690 691
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
692

693
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
694
		struct page *page;
695 696 697
		struct list_head *list;

		/*
698 699 700 701 702
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
703 704
		 */
		do {
705
			batch_free++;
706 707 708 709
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
710

711 712 713 714
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

715
		do {
716 717
			int mt;	/* migratetype of the to-be-freed page */

718 719 720
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
721
			mt = get_freepage_migratetype(page);
722
			if (unlikely(has_isolate_pageblock(zone)))
723 724
				mt = get_pageblock_migratetype(page);

725
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
726
			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
727
			trace_mm_page_pcpu_drain(page, 0, mt);
728
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
729
	}
Nick Piggin's avatar
Nick Piggin committed
730
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
731 732
}

733 734
static void free_one_page(struct zone *zone,
				struct page *page, unsigned long pfn,
735
				unsigned int order,
736
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
737
{
738
	unsigned long nr_scanned;
739
	spin_lock(&zone->lock);
740 741 742
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
743

744 745 746 747
	if (unlikely(has_isolate_pageblock(zone) ||
		is_migrate_isolate(migratetype))) {
		migratetype = get_pfnblock_migratetype(page, pfn);
	}
748
	__free_one_page(page, pfn, zone, order, migratetype);
749
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
750 751
}

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
static int free_tail_pages_check(struct page *head_page, struct page *page)
{
	if (!IS_ENABLED(CONFIG_DEBUG_VM))
		return 0;
	if (unlikely(!PageTail(page))) {
		bad_page(page, "PageTail not set", 0);
		return 1;
	}
	if (unlikely(page->first_page != head_page)) {
		bad_page(page, "first_page not consistent", 0);
		return 1;
	}
	return 0;
}

767
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
768
{
769 770
	bool compound = PageCompound(page);
	int i, bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
771

772
	VM_BUG_ON_PAGE(PageTail(page), page);
773
	VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
774

775
	trace_mm_page_free(page, order);
776
	kmemcheck_free_shadow(page, order);
777
	kasan_free_pages(page, order);
778

Andrea Arcangeli's avatar
Andrea Arcangeli committed
779 780
	if (PageAnon(page))
		page->mapping = NULL;
781 782 783 784
	bad += free_pages_check(page);
	for (i = 1; i < (1 << order); i++) {
		if (compound)
			bad += free_tail_pages_check(page, page + i);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
785
		bad += free_pages_check(page + i);
786
	}
787
	if (bad)
788
		return false;
789

790 791
	reset_page_owner(page, order);

792
	if (!PageHighMem(page)) {
793 794
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE << order);
795 796 797
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
798
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
799
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
800

801 802 803 804 805 806
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
807
	int migratetype;
808
	unsigned long pfn = page_to_pfn(page);
809 810 811 812

	if (!free_pages_prepare(page, order))
		return;

813
	migratetype = get_pfnblock_migratetype(page, pfn);
Nick Piggin's avatar
Nick Piggin committed
814
	local_irq_save(flags);
815
	__count_vm_events(PGFREE, 1 << order);
816
	set_freepage_migratetype(page, migratetype);
817
	free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
818
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
819 820
}

821
void __init __free_pages_bootmem(struct page *page, unsigned int order)
822
{
823
	unsigned int nr_pages = 1 << order;
824
	struct page *p = page;
825
	unsigned int loop;
826

827 828 829
	prefetchw(p);
	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
		prefetchw(p + 1);
830 831
		__ClearPageReserved(p);
		set_page_count(p, 0);
832
	}
833 834
	__ClearPageReserved(p);
	set_page_count(p, 0);
835

836
	page_zone(page)->managed_pages += nr_pages;
837 838
	set_page_refcounted(page);
	__free_pages(page, order);
839 840
}

841
#ifdef CONFIG_CMA
842
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
843 844 845 846 847 848 849 850 851 852 853
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
	} while (++p, --i);

	set_pageblock_migratetype(page, MIGRATE_CMA);
854 855 856 857 858 859 860 861 862 863 864 865 866 867

	if (pageblock_order >= MAX_ORDER) {
		i = pageblock_nr_pages;
		p = page;
		do {
			set_page_refcounted(p);
			__free_pages(p, MAX_ORDER - 1);
			p += MAX_ORDER_NR_PAGES;
		} while (i -= MAX_ORDER_NR_PAGES);
	} else {
		set_page_refcounted(page);
		__free_pages(page, pageblock_order);
	}

868
	adjust_managed_page_count(page, pageblock_nr_pages);
869 870
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
871 872 873 874 875 876 877 878 879 880 881 882 883

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
884
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
885
 */
Nick Piggin's avatar
Nick Piggin committed
886
static inline void expand(struct zone *zone, struct page *page,
887 888
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
889 890 891 892 893 894 895
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
896
		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);