page_alloc.c 184 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30 31 32
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
33
#include <linux/ratelimit.h>
34
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
35 36 37 38 39
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
40
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
41 42
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
43
#include <linux/vmstat.h>
44
#include <linux/mempolicy.h>
45
#include <linux/stop_machine.h>
46 47
#include <linux/sort.h>
#include <linux/pfn.h>
48
#include <linux/backing-dev.h>
49
#include <linux/fault-inject.h>
50
#include <linux/page-isolation.h>
51
#include <linux/page_cgroup.h>
52
#include <linux/debugobjects.h>
53
#include <linux/kmemleak.h>
54
#include <linux/compaction.h>
55
#include <trace/events/kmem.h>
56
#include <linux/ftrace_event.h>
57
#include <linux/memcontrol.h>
58
#include <linux/prefetch.h>
59
#include <linux/mm_inline.h>
60
#include <linux/migrate.h>
61
#include <linux/page-debug-flags.h>
62
#include <linux/hugetlb.h>
63
#include <linux/sched/rt.h>
Linus Torvalds's avatar
Linus Torvalds committed
64

65
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
66
#include <asm/tlbflush.h>
67
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
68 69
#include "internal.h"

70 71
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
72
#define MIN_PERCPU_PAGELIST_FRACTION	(8)
73

74 75 76 77 78
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

79 80 81 82 83 84 85 86 87 88 89
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
90
/*
91
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
92
 */
93 94 95 96 97 98 99
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
100 101 102
#endif
#ifdef CONFIG_MOVABLE_NODE
	[N_MEMORY] = { { [0] = 1UL } },
103 104 105 106 107 108
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

109 110 111
/* Protect totalram_pages and zone->managed_pages */
static DEFINE_SPINLOCK(managed_page_count_lock);

112
unsigned long totalram_pages __read_mostly;
113
unsigned long totalreserve_pages __read_mostly;
114 115 116 117 118 119 120 121
/*
 * When calculating the number of globally allowed dirty pages, there
 * is a certain number of per-zone reserves that should not be
 * considered dirtyable memory.  This is the sum of those reserves
 * over all existing zones that contribute dirtyable memory.
 */
unsigned long dirty_balance_reserve __read_mostly;

122
int percpu_pagelist_fraction;
123
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
124

125 126 127 128 129 130 131 132 133
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
134 135 136 137

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
138 139
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
140 141 142 143
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
144 145
}

146
void pm_restrict_gfp_mask(void)
147 148
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
149 150 151
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
152
}
153 154 155 156 157 158 159

bool pm_suspended_storage(void)
{
	if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
		return false;
	return true;
}
160 161
#endif /* CONFIG_PM_SLEEP */

162 163 164 165
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

166
static void __free_pages_ok(struct page *page, unsigned int order);
167

Linus Torvalds's avatar
Linus Torvalds committed
168 169 170 171 172 173 174
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
175 176 177
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
178
 */
179
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
180
#ifdef CONFIG_ZONE_DMA
181
	 256,
182
#endif
183
#ifdef CONFIG_ZONE_DMA32
184
	 256,
185
#endif
186
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
187
	 32,
188
#endif
Mel Gorman's avatar
Mel Gorman committed
189
	 32,
190
};
Linus Torvalds's avatar
Linus Torvalds committed
191 192 193

EXPORT_SYMBOL(totalram_pages);

194
static char * const zone_names[MAX_NR_ZONES] = {
195
#ifdef CONFIG_ZONE_DMA
196
	 "DMA",
197
#endif
198
#ifdef CONFIG_ZONE_DMA32
199
	 "DMA32",
200
#endif
201
	 "Normal",
202
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
203
	 "HighMem",
204
#endif
Mel Gorman's avatar
Mel Gorman committed
205
	 "Movable",
206 207
};

Linus Torvalds's avatar
Linus Torvalds committed
208
int min_free_kbytes = 1024;
209
int user_min_free_kbytes = -1;
Linus Torvalds's avatar
Linus Torvalds committed
210

211 212
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
213
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
214

Tejun Heo's avatar
Tejun Heo committed
215 216 217 218 219 220 221 222 223 224 225
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
226

Miklos Szeredi's avatar
Miklos Szeredi committed
227 228
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
229
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
230
EXPORT_SYMBOL(nr_node_ids);
231
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
232 233
#endif

234 235
int page_group_by_mobility_disabled __read_mostly;

236
void set_pageblock_migratetype(struct page *page, int migratetype)
237
{
238 239
	if (unlikely(page_group_by_mobility_disabled &&
		     migratetype < MIGRATE_PCPTYPES))
240 241
		migratetype = MIGRATE_UNMOVABLE;

242 243 244 245
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

246 247
bool oom_killer_disabled __read_mostly;

Nick Piggin's avatar
Nick Piggin committed
248
#ifdef CONFIG_DEBUG_VM
249
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
250
{
251 252 253
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
254
	unsigned long sp, start_pfn;
255

256 257
	do {
		seq = zone_span_seqbegin(zone);
258 259
		start_pfn = zone->zone_start_pfn;
		sp = zone->spanned_pages;
260
		if (!zone_spans_pfn(zone, pfn))
261 262 263
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

264
	if (ret)
265 266 267
		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
			pfn, zone_to_nid(zone), zone->name,
			start_pfn, start_pfn + sp);
268

269
	return ret;
270 271 272 273
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
274
	if (!pfn_valid_within(page_to_pfn(page)))
275
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
276
	if (zone != page_zone(page))
277 278 279 280 281 282 283 284 285 286
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
287
		return 1;
288 289 290
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
291 292
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
293 294 295 296 297 298 299
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

300 301
static void bad_page(struct page *page, const char *reason,
		unsigned long bad_flags)
Linus Torvalds's avatar
Linus Torvalds committed
302
{
303 304 305 306
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

307 308
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
309
		page_mapcount_reset(page); /* remove PageBuddy */
310 311 312
		return;
	}

313 314 315 316 317 318 319 320 321 322
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
323 324
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
325 326 327 328 329 330 331 332
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

333
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
334
		current->comm, page_to_pfn(page));
335
	dump_page_badflags(page, reason, bad_flags);
336

337
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
338
	dump_stack();
339
out:
340
	/* Leave bad fields for debug, except PageBuddy could make trouble */
341
	page_mapcount_reset(page); /* remove PageBuddy */
342
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
343 344 345 346 347 348 349 350 351
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
352 353
 * All pages have PG_compound set.  All tail pages have their ->first_page
 * pointing at the head page.
Linus Torvalds's avatar
Linus Torvalds committed
354
 *
355 356 357
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
358
 */
359 360 361

static void free_compound_page(struct page *page)
{
362
	__free_pages_ok(page, compound_order(page));
363 364
}

365
void prep_compound_page(struct page *page, unsigned long order)
366 367 368 369 370 371 372 373 374
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
375
		set_page_count(p, 0);
376
		p->first_page = page;
David Rientjes's avatar
David Rientjes committed
377 378 379
		/* Make sure p->first_page is always valid for PageTail() */
		smp_wmb();
		__SetPageTail(p);
380 381 382
	}
}

383
/* update __split_huge_page_refcount if you change this function */
384
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
385 386 387
{
	int i;
	int nr_pages = 1 << order;
388
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
389

390
	if (unlikely(compound_order(page) != order)) {
391
		bad_page(page, "wrong compound order", 0);
392 393
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
394

395
	__ClearPageHead(page);
396

397 398
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
399

400 401 402 403 404
		if (unlikely(!PageTail(p))) {
			bad_page(page, "PageTail not set", 0);
			bad++;
		} else if (unlikely(p->first_page != page)) {
			bad_page(page, "first_page not consistent", 0);
405 406
			bad++;
		}
407
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
408
	}
409 410

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
411 412
}

413 414
static inline void prep_zero_page(struct page *page, unsigned int order,
							gfp_t gfp_flags)
Nick Piggin's avatar
Nick Piggin committed
415 416 417
{
	int i;

418 419 420 421
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
422
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
423 424 425 426
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;

static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
		printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
		return 0;
	}
	_debug_guardpage_minorder = res;
	printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
	return 0;
}
__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);

static inline void set_page_guard_flag(struct page *page)
{
	__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}

static inline void clear_page_guard_flag(struct page *page)
{
	__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
}
#else
static inline void set_page_guard_flag(struct page *page) { }
static inline void clear_page_guard_flag(struct page *page) { }
#endif

458
static inline void set_page_order(struct page *page, unsigned int order)
459
{
460
	set_page_private(page, order);
461
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
462 463 464 465
}

static inline void rmv_page_order(struct page *page)
{
466
	__ClearPageBuddy(page);
467
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
485
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
486 487
 */
static inline unsigned long
488
__find_buddy_index(unsigned long page_idx, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
489
{
490
	return page_idx ^ (1 << order);
Linus Torvalds's avatar
Linus Torvalds committed
491 492 493 494 495
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
496
 * (a) the buddy is not in a hole &&
497
 * (b) the buddy is in the buddy system &&
498 499
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
500
 *
501 502 503 504
 * For recording whether a page is in the buddy system, we set ->_mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE.
 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
 * serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
505
 *
506
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
507
 */
508
static inline int page_is_buddy(struct page *page, struct page *buddy,
509
							unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
510
{
511
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
512 513
		return 0;

514
	if (page_is_guard(buddy) && page_order(buddy) == order) {
515
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
516 517 518 519

		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

520 521 522
		return 1;
	}

523
	if (PageBuddy(buddy) && page_order(buddy) == order) {
524
		VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
525 526 527 528 529 530 531 532 533

		/*
		 * zone check is done late to avoid uselessly
		 * calculating zone/node ids for pages that could
		 * never merge.
		 */
		if (page_zone_id(page) != page_zone_id(buddy))
			return 0;

534
		return 1;
535
	}
536
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
552 553 554
 * free pages of length of (1 << order) and marked with _mapcount
 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
 * field.
Linus Torvalds's avatar
Linus Torvalds committed
555
 * So when we are allocating or freeing one, we can derive the state of the
556 557
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
Linus Torvalds's avatar
Linus Torvalds committed
558
 * If a block is freed, and its buddy is also free, then this
559
 * triggers coalescing into a block of larger size.
Linus Torvalds's avatar
Linus Torvalds committed
560
 *
561
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
562 563
 */

Nick Piggin's avatar
Nick Piggin committed
564
static inline void __free_one_page(struct page *page,
565
		unsigned long pfn,
566 567
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
568 569
{
	unsigned long page_idx;
570
	unsigned long combined_idx;
571
	unsigned long uninitialized_var(buddy_idx);
572
	struct page *buddy;
Linus Torvalds's avatar
Linus Torvalds committed
573

574 575
	VM_BUG_ON(!zone_is_initialized(zone));

576
	if (unlikely(PageCompound(page)))
577 578
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
579

580 581
	VM_BUG_ON(migratetype == -1);

582
	page_idx = pfn & ((1 << MAX_ORDER) - 1);
Linus Torvalds's avatar
Linus Torvalds committed
583

584 585
	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
	VM_BUG_ON_PAGE(bad_range(zone, page), page);
Linus Torvalds's avatar
Linus Torvalds committed
586 587

	while (order < MAX_ORDER-1) {
588 589
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
590
		if (!page_is_buddy(page, buddy, order))
591
			break;
592 593 594 595 596 597 598
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
		if (page_is_guard(buddy)) {
			clear_page_guard_flag(buddy);
			set_page_private(page, 0);
599 600
			__mod_zone_freepage_state(zone, 1 << order,
						  migratetype);
601 602 603 604 605
		} else {
			list_del(&buddy->lru);
			zone->free_area[order].nr_free--;
			rmv_page_order(buddy);
		}
606
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
607 608 609 610 611
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
612 613 614 615 616 617 618 619 620

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
621
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
622
		struct page *higher_page, *higher_buddy;
623 624 625
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
626
		higher_buddy = higher_page + (buddy_idx - combined_idx);
627 628 629 630 631 632 633 634 635
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
636 637 638
	zone->free_area[order].nr_free++;
}

639
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
640
{
641
	const char *bad_reason = NULL;
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
		bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
	}
	if (unlikely(mem_cgroup_bad_page_check(page)))
		bad_reason = "cgroup check failed";
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
658
		return 1;
659
	}
660
	page_cpupid_reset_last(page);
661 662 663
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
664 665 666
}

/*
667
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
668
 * Assumes all pages on list are in same zone, and of same order.
669
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
670 671 672 673 674 675 676
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
677 678
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
679
{
680
	int migratetype = 0;
681
	int batch_free = 0;
682
	int to_free = count;
683
	unsigned long nr_scanned;
684

Nick Piggin's avatar
Nick Piggin committed
685
	spin_lock(&zone->lock);
686 687 688
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
689

690
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
691
		struct page *page;
692 693 694
		struct list_head *list;

		/*
695 696 697 698 699
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
700 701
		 */
		do {
702
			batch_free++;
703 704 705 706
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
707

708 709 710 711
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

712
		do {
713 714
			int mt;	/* migratetype of the to-be-freed page */

715 716 717
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
718
			mt = get_freepage_migratetype(page);
719
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
720
			__free_one_page(page, page_to_pfn(page), zone, 0, mt);
721
			trace_mm_page_pcpu_drain(page, 0, mt);
722
			if (likely(!is_migrate_isolate_page(page))) {
723 724 725 726
				__mod_zone_page_state(zone, NR_FREE_PAGES, 1);
				if (is_migrate_cma(mt))
					__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
			}
727
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
728
	}
Nick Piggin's avatar
Nick Piggin committed
729
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
730 731
}

732 733
static void free_one_page(struct zone *zone,
				struct page *page, unsigned long pfn,
734
				unsigned int order,
735
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
736
{
737
	unsigned long nr_scanned;
738
	spin_lock(&zone->lock);
739 740 741
	nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
	if (nr_scanned)
		__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
742

743
	__free_one_page(page, pfn, zone, order, migratetype);
744
	if (unlikely(!is_migrate_isolate(migratetype)))
745
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
746
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
747 748
}

749
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
750
{
Linus Torvalds's avatar
Linus Torvalds committed
751
	int i;
752
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
753

754
	trace_mm_page_free(page, order);
755 756
	kmemcheck_free_shadow(page, order);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
757 758 759 760
	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);
761
	if (bad)
762
		return false;
763

764
	if (!PageHighMem(page)) {
765 766
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE << order);
767 768 769
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
770
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
771
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
772

773 774 775 776 777 778
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
779
	int migratetype;
780
	unsigned long pfn = page_to_pfn(page);
781 782 783 784

	if (!free_pages_prepare(page, order))
		return;

785
	migratetype = get_pfnblock_migratetype(page, pfn);
Nick Piggin's avatar
Nick Piggin committed
786
	local_irq_save(flags);
787
	__count_vm_events(PGFREE, 1 << order);
788
	set_freepage_migratetype(page, migratetype);
789
	free_one_page(page_zone(page), page, pfn, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
790
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
791 792
}

793
void __init __free_pages_bootmem(struct page *page, unsigned int order)
794
{
795
	unsigned int nr_pages = 1 << order;
796
	struct page *p = page;
797
	unsigned int loop;
798

799 800 801
	prefetchw(p);
	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
		prefetchw(p + 1);
802 803
		__ClearPageReserved(p);
		set_page_count(p, 0);
804
	}
805 806
	__ClearPageReserved(p);
	set_page_count(p, 0);
807

808
	page_zone(page)->managed_pages += nr_pages;
809 810
	set_page_refcounted(page);
	__free_pages(page, order);
811 812
}

813
#ifdef CONFIG_CMA
814
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
815 816 817 818 819 820 821 822 823 824 825
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
	} while (++p, --i);

	set_pageblock_migratetype(page, MIGRATE_CMA);
826 827 828 829 830 831 832 833 834 835 836 837 838 839

	if (pageblock_order >= MAX_ORDER) {
		i = pageblock_nr_pages;
		p = page;
		do {
			set_page_refcounted(p);
			__free_pages(p, MAX_ORDER - 1);
			p += MAX_ORDER_NR_PAGES;
		} while (i -= MAX_ORDER_NR_PAGES);
	} else {
		set_page_refcounted(page);
		__free_pages(page, pageblock_order);
	}

840
	adjust_managed_page_count(page, pageblock_nr_pages);
841 842
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
843 844 845 846 847 848 849 850 851 852 853 854 855

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
856
 * -- nyc
Linus Torvalds's avatar
Linus Torvalds committed
857
 */
Nick Piggin's avatar
Nick Piggin committed
858
static inline void expand(struct zone *zone, struct page *page,
859 860
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
861 862 863 864 865 866 867
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
868
		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
869 870 871 872 873 874 875 876 877 878 879 880 881

#ifdef CONFIG_DEBUG_PAGEALLOC
		if (high < debug_guardpage_minorder()) {
			/*
			 * Mark as guard pages (or page), that will allow to
			 * merge back to allocator when buddy will be freed.
			 * Corresponding page table entries will not be touched,
			 * pages will stay not present in virtual address space
			 */
			INIT_LIST_HEAD(&page[size].lru);
			set_page_guard_flag(&page[size]);
			set_page_private(&page[size], high);
			/* Guard pages are not available for any usage */
882 883
			__mod_zone_freepage_state(zone, -(1 << high),
						  migratetype);
884 885 886
			continue;
		}
#endif
887
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
888 889 890 891 892 893 894 895
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
896
static inline int check_new_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
897
{
898
	const char *bad_reason = NULL;
899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914
	unsigned long bad_flags = 0;

	if (unlikely(page_mapcount(page)))
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
	if (unlikely(atomic_read(&page->_count) != 0))
		bad_reason = "nonzero _count";
	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
	}
	if (unlikely(mem_cgroup_bad_page_check(page)))
		bad_reason = "cgroup check failed";
	if (unlikely(bad_reason)) {
		bad_page(page, bad_reason, bad_flags);
915
		return 1;
Hugh Dickins's avatar