page_alloc.c 157 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30 31 32
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
33
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
34 35 36 37 38
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
39
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
42
#include <linux/vmstat.h>
43
#include <linux/mempolicy.h>
44
#include <linux/stop_machine.h>
45 46
#include <linux/sort.h>
#include <linux/pfn.h>
47
#include <linux/backing-dev.h>
48
#include <linux/fault-inject.h>
49
#include <linux/page-isolation.h>
50
#include <linux/page_cgroup.h>
51
#include <linux/debugobjects.h>
52
#include <linux/kmemleak.h>
53
#include <linux/memory.h>
54
#include <linux/compaction.h>
55
#include <trace/events/kmem.h>
56
#include <linux/ftrace_event.h>
57
#include <linux/memcontrol.h>
58
#include <linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
59 60

#include <asm/tlbflush.h>
61
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
62 63
#include "internal.h"

64 65 66 67 68
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

69 70 71 72 73 74 75 76 77 78 79
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
80
/*
81
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
82
 */
83 84 85 86 87 88 89 90 91 92 93 94 95
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

96
unsigned long totalram_pages __read_mostly;
97
unsigned long totalreserve_pages __read_mostly;
98
int percpu_pagelist_fraction;
99
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
100

101 102 103 104 105 106 107 108 109
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
110 111 112 113

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
114 115
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
116 117 118 119
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
120 121
}

122
void pm_restrict_gfp_mask(void)
123 124
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
125 126 127
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
128 129 130
}
#endif /* CONFIG_PM_SLEEP */

131 132 133 134
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

135
static void __free_pages_ok(struct page *page, unsigned int order);
136

Linus Torvalds's avatar
Linus Torvalds committed
137 138 139 140 141 142 143
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
144 145 146
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
147
 */
148
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
149
#ifdef CONFIG_ZONE_DMA
150
	 256,
151
#endif
152
#ifdef CONFIG_ZONE_DMA32
153
	 256,
154
#endif
155
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
156
	 32,
157
#endif
Mel Gorman's avatar
Mel Gorman committed
158
	 32,
159
};
Linus Torvalds's avatar
Linus Torvalds committed
160 161 162

EXPORT_SYMBOL(totalram_pages);

163
static char * const zone_names[MAX_NR_ZONES] = {
164
#ifdef CONFIG_ZONE_DMA
165
	 "DMA",
166
#endif
167
#ifdef CONFIG_ZONE_DMA32
168
	 "DMA32",
169
#endif
170
	 "Normal",
171
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
172
	 "HighMem",
173
#endif
Mel Gorman's avatar
Mel Gorman committed
174
	 "Movable",
175 176
};

Linus Torvalds's avatar
Linus Torvalds committed
177 178
int min_free_kbytes = 1024;

179 180
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
181
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
182

183 184
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
Simon Arlott's avatar
Simon Arlott committed
185
   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
   * ranges of memory (RAM) that may be registered with add_active_range().
   * Ranges passed to add_active_range() will be merged if possible
   * so the number of times add_active_range() can be called is
   * related to the number of nodes and the number of holes
   */
  #ifdef CONFIG_MAX_ACTIVE_REGIONS
    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  #else
    #if MAX_NUMNODES >= 32
      /* If there can be many nodes, allow up to 50 holes per node */
      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
    #else
      /* By default, allow up to 256 distinct regions */
      #define MAX_ACTIVE_REGIONS 256
    #endif
  #endif

204 205 206 207
  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  static int __meminitdata nr_nodemap_entries;
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
Adrian Bunk's avatar
Adrian Bunk committed
208
  static unsigned long __initdata required_kernelcore;
209
  static unsigned long __initdata required_movablecore;
Adrian Bunk's avatar
Adrian Bunk committed
210
  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gorman's avatar
Mel Gorman committed
211 212 213 214

  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  int movable_zone;
  EXPORT_SYMBOL(movable_zone);
215 216
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */

Miklos Szeredi's avatar
Miklos Szeredi committed
217 218
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
219
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
220
EXPORT_SYMBOL(nr_node_ids);
221
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
222 223
#endif

224 225
int page_group_by_mobility_disabled __read_mostly;

226 227
static void set_pageblock_migratetype(struct page *page, int migratetype)
{
228 229 230 231

	if (unlikely(page_group_by_mobility_disabled))
		migratetype = MIGRATE_UNMOVABLE;

232 233 234 235
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

236 237
bool oom_killer_disabled __read_mostly;

Nick Piggin's avatar
Nick Piggin committed
238
#ifdef CONFIG_DEBUG_VM
239
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
240
{
241 242 243
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
244

245 246 247 248 249 250 251 252 253
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
254 255 256 257
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
258
	if (!pfn_valid_within(page_to_pfn(page)))
259
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
260
	if (zone != page_zone(page))
261 262 263 264 265 266 267 268 269 270
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
271
		return 1;
272 273 274
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
275 276
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
277 278 279 280 281 282 283
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

284
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
285
{
286 287 288 289
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

290 291
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
292
		reset_page_mapcount(page); /* remove PageBuddy */
293 294 295
		return;
	}

296 297 298 299 300 301 302 303 304 305
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
306 307
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
308 309 310 311 312 313 314 315
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

316
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
317
		current->comm, page_to_pfn(page));
318
	dump_page(page);
319

Linus Torvalds's avatar
Linus Torvalds committed
320
	dump_stack();
321
out:
322
	/* Leave bad fields for debug, except PageBuddy could make trouble */
323
	reset_page_mapcount(page); /* remove PageBuddy */
324
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
325 326 327 328 329 330 331 332 333 334 335 336
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
337 338 339
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
340
 */
341 342 343

static void free_compound_page(struct page *page)
{
344
	__free_pages_ok(page, compound_order(page));
345 346
}

347
void prep_compound_page(struct page *page, unsigned long order)
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;

		__SetPageTail(p);
		p->first_page = page;
	}
}

363
/* update __split_huge_page_refcount if you change this function */
364
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
365 366 367
{
	int i;
	int nr_pages = 1 << order;
368
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
369

370 371
	if (unlikely(compound_order(page) != order) ||
	    unlikely(!PageHead(page))) {
372
		bad_page(page);
373 374
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
375

376
	__ClearPageHead(page);
377

378 379
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
380

381
		if (unlikely(!PageTail(p) || (p->first_page != page))) {
382
			bad_page(page);
383 384
			bad++;
		}
385
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
386
	}
387 388

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
389 390
}

Nick Piggin's avatar
Nick Piggin committed
391 392 393 394
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

395 396 397 398
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
399
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
400 401 402 403
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

404 405
static inline void set_page_order(struct page *page, int order)
{
406
	set_page_private(page, order);
407
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
408 409 410 411
}

static inline void rmv_page_order(struct page *page)
{
412
	__ClearPageBuddy(page);
413
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
431
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
432 433
 */
static inline unsigned long
434
__find_buddy_index(unsigned long page_idx, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
435
{
436
	return page_idx ^ (1 << order);
Linus Torvalds's avatar
Linus Torvalds committed
437 438 439 440 441
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
442
 * (a) the buddy is not in a hole &&
443
 * (b) the buddy is in the buddy system &&
444 445
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
446
 *
Andrea Arcangeli's avatar
Andrea Arcangeli committed
447 448
 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
449
 *
450
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
451
 */
452 453
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
454
{
455
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
456 457
		return 0;

458 459 460 461
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
462
		VM_BUG_ON(page_count(buddy) != 0);
463
		return 1;
464
	}
465
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
Andrea Arcangeli's avatar
Andrea Arcangeli committed
481
 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
482
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487 488 489 490 491
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
492
static inline void __free_one_page(struct page *page,
493 494
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
495 496
{
	unsigned long page_idx;
497
	unsigned long combined_idx;
498
	unsigned long uninitialized_var(buddy_idx);
499
	struct page *buddy;
Linus Torvalds's avatar
Linus Torvalds committed
500

501
	if (unlikely(PageCompound(page)))
502 503
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
504

505 506
	VM_BUG_ON(migratetype == -1);

Linus Torvalds's avatar
Linus Torvalds committed
507 508
	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

509
	VM_BUG_ON(page_idx & ((1 << order) - 1));
Nick Piggin's avatar
Nick Piggin committed
510
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
511 512

	while (order < MAX_ORDER-1) {
513 514
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
515
		if (!page_is_buddy(page, buddy, order))
516
			break;
Nick Piggin's avatar
Nick Piggin committed
517

518
		/* Our buddy is free, merge with it and move up one order. */
Linus Torvalds's avatar
Linus Torvalds committed
519
		list_del(&buddy->lru);
520
		zone->free_area[order].nr_free--;
Linus Torvalds's avatar
Linus Torvalds committed
521
		rmv_page_order(buddy);
522
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
523 524 525 526 527
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
528 529 530 531 532 533 534 535 536

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
537
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
538
		struct page *higher_page, *higher_buddy;
539 540 541 542
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
		higher_buddy = page + (buddy_idx - combined_idx);
543 544 545 546 547 548 549 550 551
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
552 553 554
	zone->free_area[order].nr_free++;
}

555 556 557 558 559 560 561 562 563 564 565
/*
 * free_page_mlock() -- clean up attempts to free and mlocked() page.
 * Page should not be on lru, so no need to fix that up.
 * free_pages_check() will verify...
 */
static inline void free_page_mlock(struct page *page)
{
	__dec_zone_page_state(page, NR_MLOCK);
	__count_vm_event(UNEVICTABLE_MLOCKFREED);
}

566
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
567
{
Nick Piggin's avatar
Nick Piggin committed
568 569
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
570
		(atomic_read(&page->_count) != 0) |
571 572
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
		(mem_cgroup_bad_page_check(page)))) {
573
		bad_page(page);
574
		return 1;
575
	}
576 577 578
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
579 580 581
}

/*
582
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
583
 * Assumes all pages on list are in same zone, and of same order.
584
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
585 586 587 588 589 590 591
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
592 593
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
594
{
595
	int migratetype = 0;
596
	int batch_free = 0;
597
	int to_free = count;
598

Nick Piggin's avatar
Nick Piggin committed
599
	spin_lock(&zone->lock);
600
	zone->all_unreclaimable = 0;
Linus Torvalds's avatar
Linus Torvalds committed
601
	zone->pages_scanned = 0;
602

603
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
604
		struct page *page;
605 606 607
		struct list_head *list;

		/*
608 609 610 611 612
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
613 614
		 */
		do {
615
			batch_free++;
616 617 618 619
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
620

621 622 623 624
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

625 626 627 628
		do {
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
629 630 631
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
			__free_one_page(page, zone, 0, page_private(page));
			trace_mm_page_pcpu_drain(page, 0, page_private(page));
632
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
633
	}
634
	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
Nick Piggin's avatar
Nick Piggin committed
635
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
636 637
}

638 639
static void free_one_page(struct zone *zone, struct page *page, int order,
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
640
{
641
	spin_lock(&zone->lock);
642
	zone->all_unreclaimable = 0;
643
	zone->pages_scanned = 0;
644

645
	__free_one_page(page, zone, order, migratetype);
646
	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
647
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
648 649
}

650
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
651
{
Linus Torvalds's avatar
Linus Torvalds committed
652
	int i;
653
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
654

655
	trace_mm_page_free_direct(page, order);
656 657
	kmemcheck_free_shadow(page, order);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
658 659 660 661
	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);
662
	if (bad)
663
		return false;
664

665
	if (!PageHighMem(page)) {
Nick Piggin's avatar
Nick Piggin committed
666
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
667 668 669
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
670
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
671
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
672

673 674 675 676 677 678 679 680 681 682 683
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
	int wasMlocked = __TestClearPageMlocked(page);

	if (!free_pages_prepare(page, order))
		return;

Nick Piggin's avatar
Nick Piggin committed
684
	local_irq_save(flags);
685
	if (unlikely(wasMlocked))
686
		free_page_mlock(page);
687
	__count_vm_events(PGFREE, 1 << order);
688 689
	free_one_page(page_zone(page), page, order,
					get_pageblock_migratetype(page));
Nick Piggin's avatar
Nick Piggin committed
690
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
691 692
}

693 694 695
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
696
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
697 698 699 700
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
701
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
702
		__free_page(page);
703 704 705
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
706
		prefetchw(page);
707 708 709
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
710 711
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
712 713 714 715
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

716
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
717
		__free_pages(page, order);
718 719 720
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
736
static inline void expand(struct zone *zone, struct page *page,
737 738
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
739 740 741 742 743 744 745
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
746
		VM_BUG_ON(bad_range(zone, &page[size]));
747
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
748 749 750 751 752 753 754 755
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
756
static inline int check_new_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
757
{
Nick Piggin's avatar
Nick Piggin committed
758 759
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
760
		(atomic_read(&page->_count) != 0)  |
761 762
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
		(mem_cgroup_bad_page_check(page)))) {
763
		bad_page(page);
764
		return 1;
765
	}
766 767 768 769 770 771 772 773 774 775 776 777
	return 0;
}

static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;
		if (unlikely(check_new_page(p)))
			return 1;
	}
778

779
	set_page_private(page, 0);
780
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
781 782

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
783
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
784 785 786 787 788 789 790

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

791
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
792 793
}

794 795 796 797
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
798 799
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}


825 826 827 828 829
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
830 831 832 833
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
834 835
};

836 837
/*
 * Move the free pages in a range to the free lists of the requested type.
838
 * Note that start_page and end_pages are not aligned on a pageblock
839 840
 * boundary. If alignment is required, use move_freepages_block()
 */
Adrian Bunk's avatar
Adrian Bunk committed
841 842 843
static int move_freepages(struct zone *zone,
			  struct page *start_page, struct page *end_page,
			  int migratetype)
844 845 846
{
	struct page *page;
	unsigned long order;
847
	int pages_moved = 0;
848 849 850 851 852 853 854

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
855
	 * grouping pages by mobility
856 857 858 859 860
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
861 862 863
		/* Make sure we are not inadvertently changing nodes */
		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));

864 865 866 867 868 869 870 871 872 873 874
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
875 876
		list_move(&page->lru,
			  &zone->free_area[order].free_list[migratetype]);
877
		page += 1 << order;
878
		pages_moved += 1 << order;
879 880
	}

881
	return pages_moved;
882 883
}

Adrian Bunk's avatar
Adrian Bunk committed
884 885
static int move_freepages_block(struct zone *zone, struct page *page,
				int migratetype)
886 887 888 889 890
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

	start_pfn = page_to_pfn(page);
891
	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
892
	start_page = pfn_to_page(start_pfn);
893 894
	end_page = start_page + pageblock_nr_pages - 1;
	end_pfn = start_pfn + pageblock_nr_pages - 1;
895 896 897 898 899 900 901 902 903 904

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
}

905 906 907 908 909 910 911 912 913 914 915
static void change_pageblock_range(struct page *pageblock_page,
					int start_order, int migratetype)
{
	int nr_pageblocks = 1 << (start_order - pageblock_order);

	while (nr_pageblocks--) {
		set_pageblock_migratetype(pageblock_page, migratetype);
		pageblock_page += pageblock_nr_pages;
	}
}

916
/* Remove an element from the buddy allocator from the fallback list */
917 918
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
919 920 921 922 923 924 925 926 927 928 929 930
{
	struct free_area * area;
	int current_order;
	struct page *page;
	int migratetype, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1; current_order >= order;
						--current_order) {
		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
			migratetype = fallbacks[start_migratetype][i];

931 932 933
			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
				continue;
934

935 936 937 938 939 940 941 942 943
			area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
				continue;

			page = list_entry(area->free_list[migratetype].next,
					struct page, lru);
			area->nr_free--;

			/*
944
			 * If breaking a large block of pages, move all free
945 946
			 * pages to the preferred allocation list. If falling
			 * back for a reclaimable kernel allocation, be more
Lucas De Marchi's avatar
Lucas De Marchi committed
947
			 * aggressive about taking ownership of free pages
948
			 */
949
			if (unlikely(current_order >= (pageblock_order >> 1)) ||
950 951
					start_migratetype == MIGRATE_RECLAIMABLE ||
					page_group_by_mobility_disabled) {
952 953