page_alloc.c 183 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
28
#include <linux/kasan.h>
Linus Torvalds's avatar
Linus Torvalds committed
29 30 31 32 33
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
34
#include <linux/ratelimit.h>
35
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39 40
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
41
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
42 43
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
44
#include <linux/vmstat.h>
45
#include <linux/mempolicy.h>
46
#include <linux/stop_machine.h>
47 48
#include <linux/sort.h>
#include <linux/pfn.h>
49
#include <linux/backing-dev.h>
50
#include <linux/fault-inject.h>
51
#include <linux/page-isolation.h>
52
#include <linux/page_ext.h>
53
#include <linux/debugobjects.h>
54
#include <linux/kmemleak.h>
55
#include <linux/compaction.h>
56
#include <trace/events/kmem.h>
57
#include <linux/prefetch.h>
58
#include <linux/mm_inline.h>
59
#include <linux/migrate.h>
60
#include <linux/page_ext.h>
61
#include <linux/hugetlb.h>
62
#include <linux/sched/rt.h>
63
#include <linux/page_owner.h>
Linus Torvalds's avatar
Linus Torvalds committed
64

65
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
66
#include <asm/tlbflush.h>
67
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
68 69
#include "internal.h"

70 71
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
72
#define MIN_PERCPU_PAGELIST_FRACTION	(8)
73

74 75 76 77 78
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

79 80 81 82 83 84 85 86 87
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
88
int _node_numa_mem_[MAX_NUMNODES];
89 90
#endif

Linus Torvalds's avatar
Linus Torvalds committed
91
/*
92
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
93
 */
94 95 96 97 98 99 100
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
101 102 103
#endif
#ifdef CONFIG_MOVABLE_NODE
	[N_MEMORY] = { { [0] = 1UL } },
104 105 106 107 108 109
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

110 111 112
/* Protect totalram_pages and zone->managed_pages */
static DEFINE_SPINLOCK(managed_page_count_lock);

113
unsigned long totalram_pages __read_mostly;
114
unsigned long totalreserve_pages __read_mostly;
115
unsigned long totalcma_pages __read_mostly;
116 117 118 119 120 121 122 123
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

124
int percpu_pagelist_fraction;
125
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
126

127 128 129 130 131 132 133 134 135
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
136 137 138 139

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
140 141
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
142 143 144 145
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
146 147
}

148
void pm_restrict_gfp_mask(void)
149 150
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
151 152 153
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
154
}
155 156 157 158 159 160 161

bool pm_suspended_storage(void)
{
	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
		return false;
	return true;
}
162 163
#endif /* CONFIG_PM_SLEEP */

164 165 166 167
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

168
static void __free_pages_ok(struct page *page, unsigned int order);
169

Linus Torvalds's avatar
Linus Torvalds committed
170 171 172 173 174 175
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Yaowei Bai's avatar
Yaowei Bai committed
176
 *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
177 178 179
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
180
 */
181
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
182
#ifdef CONFIG_ZONE_DMA
183
	 256,
184
#endif
185
#ifdef CONFIG_ZONE_DMA32
186
	 256,
187
#endif
188
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
189
	 32,
190
#endif
Mel Gorman's avatar
Mel Gorman committed
191
	 32,
192
};
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195

EXPORT_SYMBOL(totalram_pages);

196
static char * const zone_names[MAX_NR_ZONES] = {
197
#ifdef CONFIG_ZONE_DMA
198
	 "DMA",
199
#endif
200
#ifdef CONFIG_ZONE_DMA32
201
	 "DMA32",
202
#endif
203
	 "Normal",
204
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
205
	 "HighMem",
206
#endif
Mel Gorman's avatar
Mel Gorman committed
207
	 "Movable",
208 209
};

Linus Torvalds's avatar
Linus Torvalds committed
210
int min_free_kbytes = 1024;
211
int user_min_free_kbytes = -1;
Linus Torvalds's avatar
Linus Torvalds committed
212

213 214
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
215
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
216

Tejun Heo's avatar
Tejun Heo committed
217 218 219 220 221 222 223 224 225 226 227
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
228

229 230
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
231
int nr_online_nodes __read_mostly = 1;
232
EXPORT_SYMBOL(nr_node_ids);
233
EXPORT_SYMBOL(nr_online_nodes);
234 235
#endif

236 237
int page_group_by_mobility_disabled __read_mostly;

238
void set_pageblock_migratetype(struct page *page, int migratetype)
239
{
240 241
	if (unlikely(page_group_by_mobility_disabled &&
		     migratetype < MIGRATE_PCPTYPES))
242 243
		migratetype = MIGRATE_UNMOVABLE;

244 245 246 247
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

Nick Piggin's avatar
Nick Piggin committed
248
#ifdef CONFIG_DEBUG_VM
249
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
250
{
251 252 253
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
254
	unsigned long sp, start_pfn;
255

256 257
	do {
		seq = zone_span_seqbegin(zone);
258 259
		start_pfn = zone->zone_start_pfn;
		sp = zone->spanned_pages;
260
		if (!zone_spans_pfn(zone, pfn))
261 262 263
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

264
	if (ret)
265 266 267
		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
			pfn, zone_to_nid(zone), zone->name,
			start_pfn, start_pfn + sp);
268

269
	return ret;
270 271 272 273
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
274
	if (!pfn_valid_within(page_to_pfn(page)))
275
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
276
	if (zone != page_zone(page))
277 278 279 280 281 282 283 284 285 286
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
287
		return 1;
288 289 290
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
291 292
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
293 294 295 296 297 298 299
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

300 301
static void bad_page(struct page *page, const char *reason,
		unsigned long bad_flags)
Linus Torvalds's avatar
Linus Torvalds committed
302
{
303 304 305 306
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

307 308
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
309
		page_mapcount_reset(page); /* remove PageBuddy */
310 311 312
		return;
	}

313 314 315 316 317 318 319 320 321 322
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
323 324
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
325 326 327 328 329 330 331 332
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

333
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
334
		current->comm, page_to_pfn(page));
335
	dump_page_badflags(page, reason, bad_flags);
336

337
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
338
	dump_stack();
339
out:
340
	/* Leave bad fields for debug, except PageBuddy could make trouble */
341
	page_mapcount_reset(page); /* remove PageBuddy */
342
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
343 344 345 346 347 348 349 350 351
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
352 353
 * All pages have PG_compound set.  All tail pages have their ->first_page
 * pointing at the head page.
Linus Torvalds's avatar
Linus Torvalds committed
354
 *
355 356 357
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
358
 */
359 360 361

static void free_compound_page(struct page *page)
{
362
	__free_pages_ok(page, compound_order(page));
363 364
}

365
void prep_compound_page(struct page *page, unsigned long order)
366 367 368 369 370 371 372 373 374
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
375
		set_page_count(p, 0);
376
		p->first_page = page;
David Rientjes's avatar
David Rientjes committed
377 378 379
		/* Make sure p->first_page is always valid for PageTail() */
		smp_wmb();
		__SetPageTail(p);
380 381 382
	}
}

383 384
static inline void prep_zero_page(struct page *page, unsigned int order,
							gfp_t gfp_flags)
385 386 387
{
	int i;

388 389 390 391
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
392
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
393 394 395 396
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

397 398
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
399
bool _debug_pagealloc_enabled __read_mostly;
400 401
bool _debug_guardpage_enabled __read_mostly;

402 403 404 405 406 407 408 409 410 411 412 413
static int __init early_debug_pagealloc(char *buf)
{
	if (!buf)
		return -EINVAL;

	if (strcmp(buf, "on") == 0)
		_debug_pagealloc_enabled = true;

	return 0;
}
early_param("debug_pagealloc", early_debug_pagealloc);

414 415
static bool need_debug_guardpage(void)
{
416 417 418 419
	/* If we don't use debug_pagealloc, we don't need guard page */
	if (!debug_pagealloc_enabled())
		return false;

420 421 422 423 424
	return true;
}

static void init_debug_guardpage(void)
{
425 426 427
	if (!debug_pagealloc_enabled())
		return;

428 429 430 431 432 433 434
	_debug_guardpage_enabled = true;
}

struct page_ext_operations debug_guardpage_ops = {
	.need = need_debug_guardpage,
	.init = init_debug_guardpage,
};
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449

static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
		return 0;
	}
	_debug_guardpage_minorder = res;
	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
	return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

450 451
static inline void set_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
452
{
453 454 455 456 457 458 459 460
	struct page_ext *page_ext;

	if (!debug_guardpage_enabled())
		return;

	page_ext = lookup_page_ext(page);
	__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);

461 462 463 464
	INIT_LIST_HEAD(&page->lru);
	set_page_private(page, order);
	/* Guard pages are not available for any usage */
	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
465 466
}

467 468
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
469
{
470 471 472 473 474 475 476 477
	struct page_ext *page_ext;

	if (!debug_guardpage_enabled())
		return;

	page_ext = lookup_page_ext(page);
	__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);

478 479 480
	set_page_private(page, 0);
	if (!is_migrate_isolate(migratetype))
		__mod_zone_freepage_state(zone, (1 << order), migratetype);
481 482
}
#else
483
struct page_ext_operations debug_guardpage_ops = { NULL, };
484 485 486 487
static inline void set_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
488 489
#endif

490
static inline void set_page_order(struct page *page, unsigned int order)
491
{
492
	set_page_private(page, order);
493
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
494 495 496 497
}

static inline void rmv_page_order(struct page *page)
{
498
	__ClearPageBuddy(page);
499
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
500 501 502 503 504
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
505
 * (a) the buddy is not in a hole &&
506
 * (b) the buddy is in the buddy system &&
507 508
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
509
 *
510 511 512 513
 * For recording whether a page is in the buddy system, we set ->_mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE.
 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
 * serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
514
 *
515
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
516
 */
517
static inline int page_is_buddy(struct page *page, struct page *buddy,
518
							unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
519
{
520
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
521 522
		return 0;

523
	if (page_is_guard(buddy) && page_order(buddy) == order) {
524 525 526
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

527 528
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);

529 530 531
		return 1;
	}

532
	if (PageBuddy(buddy) && page_order(buddy) == order) {
533 534 535 536 537 538 539 540
		/*
		 * zone check is done late to avoid uselessly
		 * calculating zone/node ids for pages that could
		 * never merge.
		 */
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

541 542
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);

543
		return 1;
544
	}
545
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
561 562 563
 * free pages of length of (1 << order) and marked with _mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 * field.
Linus Torvalds's avatar
Linus Torvalds committed
564
 * So when we are allocating or freeing one, we can derive the state of the
565 566
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
Linus Torvalds's avatar
Linus Torvalds committed
567
 * If a block is freed, and its buddy is also free, then this
568
 * triggers coalescing into a block of larger size.
Linus Torvalds's avatar
Linus Torvalds committed
569
 *
570
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
571 572
 */

Nick Piggin's avatar
Nick Piggin committed
573
static inline void __free_one_page(struct page *page,
574
		unsigned long pfn,
575 576
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
577 578
{
	unsigned long page_idx;
579
	unsigned long combined_idx;
580
	unsigned long uninitialized_var(buddy_idx);
581
	struct page *buddy;
582
	int max_order = MAX_ORDER;
Linus Torvalds's avatar
Linus Torvalds committed
583

584
	VM_BUG_ON(!zone_is_initialized(zone));
585
	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
Linus Torvalds's avatar
Linus Torvalds committed
586

587
	VM_BUG_ON(migratetype == -1);
588 589 590 591 592 593 594 595 596
	if (is_migrate_isolate(migratetype)) {
		/*
		 * We restrict max order of merging to prevent merge
		 * between freepages on isolate pageblock and normal
		 * pageblock. Without this, pageblock isolation
		 * could cause incorrect freepage accounting.
		 */
		max_order = min(MAX_ORDER, pageblock_order + 1);
	} else {
597
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
598
	}
599

600
	page_idx = pfn & ((1 << max_order) - 1);
Linus Torvalds's avatar
Linus Torvalds committed
601

602 603
	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
	VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds's avatar
Linus Torvalds committed
604

605
	while (order < max_order - 1) {
606 607
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
608
		if (!page_is_buddy(page, buddy, order))
609
			break;
610 611 612 613 614
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
		if (page_is_guard(buddy)) {
615
			clear_page_guard(zone, buddy, order, migratetype);
616 617 618 619 620
		} else {
			list_del(&buddy->lru);
			zone->free_area[order].nr_free--;
			rmv_page_order(buddy);
		}
621
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
622 623 624 625 626
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
627 628 629 630 631 632 633 634 635

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
636
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
637
		struct page *higher_page, *higher_buddy;
638 639 640
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
641
		higher_buddy = higher_page + (buddy_idx - combined_idx);
642 643 644 645 646 647 648 649 650
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
651 652 653
	zone->free_area[order].nr_free++;
}

654
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
655
{
656
	const char *bad_reason = NULL;
657 658 659 660 661 662 663 664 665 666 667 668
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
	}
669 670 671 672
#ifdef CONFIG_MEMCG
	if (unlikely(page->mem_cgroup))
		bad_reason = "page still charged to cgroup";
#endif
673 674
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
675
		return 1;
676
	}
677
	page_cpupid_reset_last(page);
678 679 680
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
681 682 683
}

/*
684
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
685
 * Assumes all pages on list are in same zone, and of same order.
686
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
687 688 689 690 691 692 693
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
694 695
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
696
{
697
	int migratetype = 0;
698
	int batch_free = 0;
699
	int to_free = count;
700
	unsigned long nr_scanned;
701

Nick Piggin's avatar
Nick Piggin committed
702
	spin_lock(&zone->lock);
703 704 705
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
706

707
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
708
		struct page *page;
709 710 711
		struct list_head *list;

		/*
712 713 714 715 716
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
717 718
		 */
		do {
719
			batch_free++;
720 721 722 723
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
724

725 726 727 728
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

729
		do {
730 731
			int mt;	/* migratetype of the to-be-freed page */

732 733 734
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
735
			mt = get_freepage_migratetype(page);
736
			if (unlikely(has_isolate_pageblock(zone)))
737 738
				mt = get_pageblock_migratetype(page);

739
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
740
			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
741
			trace_mm_page_pcpu_drain(page, 0, mt);
742
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
743
	}
Nick Piggin's avatar
Nick Piggin committed
744
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
745 746
}

747 748
static void free_one_page(struct zone *zone,
				struct page *page, unsigned long pfn,
749
				unsigned int order,
750
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
751
{
752
	unsigned long nr_scanned;
753
	spin_lock(&zone->lock);
754 755 756
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
757

758 759 760 761
	if (unlikely(has_isolate_pageblock(zone) ||
		is_migrate_isolate(migratetype))) {
		migratetype = get_pfnblock_migratetype(page, pfn);
	}
762
	__free_one_page(page, pfn, zone, order, migratetype);
763
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
764 765
}

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
static int free_tail_pages_check(struct page *head_page, struct page *page)
{
	if (!IS_ENABLED(CONFIG_DEBUG_VM))
		return 0;
	if (unlikely(!PageTail(page))) {
		bad_page(page, "PageTail not set", 0);
		return 1;
	}
	if (unlikely(page->first_page != head_page)) {
		bad_page(page, "first_page not consistent", 0);
		return 1;
	}
	return 0;
}

781
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
782
{
783 784
	bool compound = PageCompound(page);
	int i, bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
785

786
	VM_BUG_ON_PAGE(PageTail(page), page);
787
	VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
788

789
	trace_mm_page_free(page, order);
790
	kmemcheck_free_shadow(page, order);
791
	kasan_free_pages(page, order);
792

793 794
	if (PageAnon(page))
		page->mapping = NULL;
795 796 797 798
	bad += free_pages_check(page);
	for (i = 1; i < (1 << order); i++) {
		if (compound)
			bad += free_tail_pages_check(page, page + i);
799
		bad += free_pages_check(page + i);
800
	}
801
	if (bad)
802
		return false;
803

804 805
	reset_page_owner(page, order);

806
	if (!PageHighMem(page)) {
807 808
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE << order);
809 810 811
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
812
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
813
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
814

815 816 817 818 819 820
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
821
	int migratetype;
822
	unsigned long pfn = page_to_pfn(page);
823 824 825 826

	if (!free_pages_prepare(page, order))
		return;

827
	migratetype = get_pfnblock_migratetype(page, pfn);
Nick Piggin's avatar
Nick Piggin committed
828
	local_irq_save(flags);
829
	__count_vm_events(PGFREE, 1 << order);
830
	set_freepage_migratetype(page, migratetype);
831
	free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
832
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
833 834
}

835
void __init __free_pages_bootmem(struct page *page, unsigned int order)
836
{
837
	unsigned int nr_pages = 1 << order;
838
	struct page *p = page;
839
	unsigned int loop;
840

841 842 843
	prefetchw(p);
	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
		prefetchw(p + 1);
844 845
		__ClearPageReserved(p);
		set_page_count(p, 0);
846
	}
847 848
	__ClearPageReserved(p);
	set_page_count(p, 0);
849

850
	page_zone(page)->managed_pages += nr_pages;
851 852
	set_page_refcounted(page);
	__free_pages(page, order);
853 854
}

855
#ifdef CONFIG_CMA
856
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
857 858 859 860 861 862 863 864 865 866 867
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
	} while (++p, --i);

	set_pageblock_migratetype(page, MIGRATE_CMA);
868 869 870 871 872 873 874 875 876 877 878 879 880 881

	if (pageblock_order >= MAX_ORDER) {
		i = pageblock_nr_pages;
		p = page;
		do {
			set_page_refcounted(p);
			__free_pages(p, MAX_ORDER - 1);
			p += MAX_ORDER_NR_PAGES;
		} while (i -= MAX_ORDER_NR_PAGES);
	} else {
		set_page_refcounted(page);
		__free_pages(page, pageblock_order);
	}

882
	adjust_managed_page_count(page, pageblock_nr_pages);
883 884
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
885 886 887 888 889 890 891 892 893 894 895 896 897

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
898
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
899
 */
900
static inline void expand(struct zone *zone, struct page *page,
901 902
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
903 904 905 906 907 908 909
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
910
		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
911

912
		if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
913
			debug_guardpage_enabled() &&
914
			high < debug_guardpage_minorder()) {
915 916 917 918 919 920
			/*
			 * Mark as guard pages (or page), that will allow to
			 * merge back to allocator when buddy will be freed.
			 * Corresponding page table entries will not be touched,
			 * pages will stay not present in virtual address space
			 */
921
			set_page_guard(zone, &page[size], high, migratetype);
922 923
			continue;
		}
924
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
925 926 927 928 929 930 931 932
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
933
static inline int check_new_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
934
{
935
	const char *bad_reason = NULL;
936 937 938 939 940 941 942 943 944 945 946 947
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
	}
948 949 950 951
#ifdef CONFIG_MEMCG
	if (unlikely(page->mem_cgroup))
		bad_reason = "page still charged to cgroup";
#endif
952 953
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
954
		return 1;
955
	}
956 957 958
	return 0;
}

959 960
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
								int alloc_flags)
961 962 963 964 965 966 967 968
{
	int i;

	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;
		if (unlikely(check_new_page(p)))
			return 1;
	}
969

970
	set_page_private(page, 0);
971
	set_page_refcounted(page);
972 973

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
974
	kernel_map_pages(page, 1 << order, 1);
975
	kasan_alloc_pages(page, order);
976 977 978 979 980 981 982

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

983 984
	set_page_owner(page, order, gfp_flags);

985 986 987 988 989 990 991 992
	/*
	 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
	 * allocate the page. The expectation is that the caller is taking
	 * steps that will free more memory. The caller should avoid the page
	 * being used for !PFMEMALLOC purposes.
	 */
	page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);

993
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
994 995
}

996 997 998 999
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
1000 1001
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1002 1003 1004
						int migratetype)
{
	unsigned int current_order;
1005
	struct free_area *area;
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		expand(zone, page, order, current_order, area, migratetype);
1020
		set_freepage_migratetype(page, migratetype);
1021 1022 1023 1024 1025 1026 1027
		return page;
	}

	return NULL;
}


1028 1029 1030 1031
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
1032 1033 1034
static int fallbacks[MIGRATE_TYPES][4] = {
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
1035
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
1036 1037 1038
#ifdef CONFIG_CMA
	[MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
#endif
1039
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
1040
#ifdef CONFIG_MEMORY_ISOLATION
1041
	[MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
1042
#endif
1043 1044
};

1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
					unsigned int order)
{
	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
}
#else
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
					unsigned int order) { return NULL; }
#endif

1056 1057
/*
 * Move the free pages in a range to the free lists of the requested type.
1058
 * Note that start_page and end_pages are not aligned on a pageblock
1059 1060
 * boundary. If alignment is required, use move_freepages_block()
 */
1061
int move_freepages(struct zone *zone,
Adrian Bunk's avatar
Adrian Bunk committed
1062 1063
			  struct page *start_page, struct page *end_page,
			  int migratetype)
1064 1065 1066
{
	struct page *page;
	unsigned long order;
1067
	int pages_moved = 0;
1068 1069 1070 1071 1072 1073 1074

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
1075
	 * grouping pages by mobility
1076
	 */
1077
	VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
1078 1079 1080
#endif

	for (page = start_page; page <= end_page;) {
1081
		/* Make sure we are not inadvertently changing nodes */
1082
		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
1083

1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
1095 1096
		list_move(&page->lru,
			  &zone->free_area[order].free_list[migratetype]);
1097
		set_freepage_migratetype(page, migratetype);
1098
		page += 1 << order;
1099
		pages_moved += 1 << order;
1100 1101
	}

1102
	return pages_moved;
1103 1104
}

1105
int move_freepages_block(struct zone *zone, struct page *page,