page_alloc.c 130 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23 24
#include <linux/bootmem.h>
#include <linux/compiler.h>
25
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
26 27 28 29 30
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
31
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
32 33 34 35 36
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
37
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
38 39
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
40
#include <linux/mempolicy.h>
41
#include <linux/stop_machine.h>
42 43
#include <linux/sort.h>
#include <linux/pfn.h>
44
#include <linux/backing-dev.h>
45
#include <linux/fault-inject.h>
46
#include <linux/page-isolation.h>
47
#include <linux/page_cgroup.h>
48
#include <linux/debugobjects.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50

#include <asm/tlbflush.h>
51
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
52 53 54
#include "internal.h"

/*
55
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
56
 */
57 58 59 60 61 62 63 64 65 66 67 68 69
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

70
unsigned long totalram_pages __read_mostly;
71
unsigned long totalreserve_pages __read_mostly;
72
unsigned long highest_memmap_pfn __read_mostly;
73
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
74

75 76 77 78
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

79
static void __free_pages_ok(struct page *page, unsigned int order);
80

Linus Torvalds's avatar
Linus Torvalds committed
81 82 83 84 85 86 87
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
88 89 90
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
91
 */
92
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
93
#ifdef CONFIG_ZONE_DMA
94
	 256,
95
#endif
96
#ifdef CONFIG_ZONE_DMA32
97
	 256,
98
#endif
99
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
100
	 32,
101
#endif
Mel Gorman's avatar
Mel Gorman committed
102
	 32,
103
};
Linus Torvalds's avatar
Linus Torvalds committed
104 105 106

EXPORT_SYMBOL(totalram_pages);

107
static char * const zone_names[MAX_NR_ZONES] = {
108
#ifdef CONFIG_ZONE_DMA
109
	 "DMA",
110
#endif
111
#ifdef CONFIG_ZONE_DMA32
112
	 "DMA32",
113
#endif
114
	 "Normal",
115
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
116
	 "HighMem",
117
#endif
Mel Gorman's avatar
Mel Gorman committed
118
	 "Movable",
119 120
};

Linus Torvalds's avatar
Linus Torvalds committed
121 122
int min_free_kbytes = 1024;

123 124
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
125
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
126

127 128
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
Simon Arlott's avatar
Simon Arlott committed
129
   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
   * ranges of memory (RAM) that may be registered with add_active_range().
   * Ranges passed to add_active_range() will be merged if possible
   * so the number of times add_active_range() can be called is
   * related to the number of nodes and the number of holes
   */
  #ifdef CONFIG_MAX_ACTIVE_REGIONS
    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  #else
    #if MAX_NUMNODES >= 32
      /* If there can be many nodes, allow up to 50 holes per node */
      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
    #else
      /* By default, allow up to 256 distinct regions */
      #define MAX_ACTIVE_REGIONS 256
    #endif
  #endif

148 149 150 151
  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  static int __meminitdata nr_nodemap_entries;
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
152
#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
153 154
  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
155
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
Adrian Bunk's avatar
Adrian Bunk committed
156
  static unsigned long __initdata required_kernelcore;
157
  static unsigned long __initdata required_movablecore;
Adrian Bunk's avatar
Adrian Bunk committed
158
  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gorman's avatar
Mel Gorman committed
159 160 161 162

  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  int movable_zone;
  EXPORT_SYMBOL(movable_zone);
163 164
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */

Miklos Szeredi's avatar
Miklos Szeredi committed
165 166 167 168 169
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
#endif

170 171
int page_group_by_mobility_disabled __read_mostly;

172 173 174 175 176 177
static void set_pageblock_migratetype(struct page *page, int migratetype)
{
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

Nick Piggin's avatar
Nick Piggin committed
178
#ifdef CONFIG_DEBUG_VM
179
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
180
{
181 182 183
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
184

185 186 187 188 189 190 191 192 193
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
194 195 196 197
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
198
	if (!pfn_valid_within(page_to_pfn(page)))
199
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
200
	if (zone != page_zone(page))
201 202 203 204 205 206 207 208 209 210
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
211
		return 1;
212 213 214
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
215 216
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
217 218 219 220 221 222 223
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

224
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
225
{
226 227 228 229 230 231 232 233
	printk(KERN_EMERG "Bad page state in process %s  pfn:%05lx\n",
		current->comm, page_to_pfn(page));
	printk(KERN_EMERG
		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
		page, (void *)page->flags, page_count(page),
		page_mapcount(page), page->mapping, page->index);
	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");

Linus Torvalds's avatar
Linus Torvalds committed
234
	dump_stack();
235 236 237

	/* Leave bad fields for debug, except PageBuddy could make trouble */
	__ClearPageBuddy(page);
238
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
239 240 241 242 243 244 245 246 247 248 249 250
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
251 252 253
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
254
 */
255 256 257

static void free_compound_page(struct page *page)
{
258
	__free_pages_ok(page, compound_order(page));
259 260
}

261
void prep_compound_page(struct page *page, unsigned long order)
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;

		__SetPageTail(p);
		p->first_page = page;
	}
}

#ifdef CONFIG_HUGETLBFS
void prep_compound_gigantic_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
279 280 281
{
	int i;
	int nr_pages = 1 << order;
282
	struct page *p = page + 1;
Linus Torvalds's avatar
Linus Torvalds committed
283

284
	set_compound_page_dtor(page, free_compound_page);
285
	set_compound_order(page, order);
286
	__SetPageHead(page);
287
	for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
288 289
		__SetPageTail(p);
		p->first_page = page;
Linus Torvalds's avatar
Linus Torvalds committed
290 291
	}
}
292
#endif
Linus Torvalds's avatar
Linus Torvalds committed
293

294
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
295 296 297
{
	int i;
	int nr_pages = 1 << order;
298
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
299

300 301
	if (unlikely(compound_order(page) != order) ||
	    unlikely(!PageHead(page))) {
302
		bad_page(page);
303 304
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
305

306
	__ClearPageHead(page);
307

308 309
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
310

311
		if (unlikely(!PageTail(p) | (p->first_page != page))) {
312
			bad_page(page);
313 314
			bad++;
		}
315
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
316
	}
317 318

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
319 320
}

Nick Piggin's avatar
Nick Piggin committed
321 322 323 324
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

325 326 327 328
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
329
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
330 331 332 333
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

334 335
static inline void set_page_order(struct page *page, int order)
{
336
	set_page_private(page, order);
337
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
338 339 340 341
}

static inline void rmv_page_order(struct page *page)
{
342
	__ClearPageBuddy(page);
343
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
361
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
380
 * (a) the buddy is not in a hole &&
381
 * (b) the buddy is in the buddy system &&
382 383
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
384 385 386
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
387
 *
388
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
389
 */
390 391
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
392
{
393
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
394 395
		return 0;

396 397 398 399 400
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		BUG_ON(page_count(buddy) != 0);
401
		return 1;
402
	}
403
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
419
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
420
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
421 422 423 424 425 426 427 428 429
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
430
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
431 432 433 434
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;
435
	int migratetype = get_pageblock_migratetype(page);
Linus Torvalds's avatar
Linus Torvalds committed
436

437
	if (unlikely(PageCompound(page)))
438 439
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
440 441 442

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Nick Piggin's avatar
Nick Piggin committed
443 444
	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
445

446
	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
Linus Torvalds's avatar
Linus Torvalds committed
447 448 449 450 451
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
452
		if (!page_is_buddy(page, buddy, order))
453
			break;
Nick Piggin's avatar
Nick Piggin committed
454

455
		/* Our buddy is free, merge with it and move up one order. */
Linus Torvalds's avatar
Linus Torvalds committed
456
		list_del(&buddy->lru);
457
		zone->free_area[order].nr_free--;
Linus Torvalds's avatar
Linus Torvalds committed
458
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
459
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
460 461 462 463 464
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
465 466
	list_add(&page->lru,
		&zone->free_area[order].free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
467 468 469
	zone->free_area[order].nr_free++;
}

470
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
471
{
472
	free_page_mlock(page);
Nick Piggin's avatar
Nick Piggin committed
473 474 475
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
476
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
477
		bad_page(page);
478
		return 1;
479
	}
480 481 482
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
488
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
489 490 491 492 493 494 495
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
496 497
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
498
{
Nick Piggin's avatar
Nick Piggin committed
499
	spin_lock(&zone->lock);
500
	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
Linus Torvalds's avatar
Linus Torvalds committed
501
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
502 503 504
	while (count--) {
		struct page *page;

Nick Piggin's avatar
Nick Piggin committed
505
		VM_BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
506
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
507
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
508
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
509
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
510
	}
Nick Piggin's avatar
Nick Piggin committed
511
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
512 513
}

Nick Piggin's avatar
Nick Piggin committed
514
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
515
{
516
	spin_lock(&zone->lock);
517
	zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
518
	zone->pages_scanned = 0;
519
	__free_one_page(page, zone, order);
520
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
521 522 523 524 525
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
526
	int i;
527
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
528 529

	for (i = 0 ; i < (1 << order) ; ++i)
530 531
		bad += free_pages_check(page + i);
	if (bad)
532 533
		return;

534
	if (!PageHighMem(page)) {
Nick Piggin's avatar
Nick Piggin committed
535
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
536 537 538
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
539
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
540
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
541

Nick Piggin's avatar
Nick Piggin committed
542
	local_irq_save(flags);
543
	__count_vm_events(PGFREE, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
544
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
545
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
546 547
}

548 549 550
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
551
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
552 553 554 555
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
556
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
557
		__free_page(page);
558 559 560
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
561
		prefetchw(page);
562 563 564
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
565 566
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
567 568 569 570
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

571
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
572
		__free_pages(page, order);
573 574 575
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
591
static inline void expand(struct zone *zone, struct page *page,
592 593
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
594 595 596 597 598 599 600
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
601
		VM_BUG_ON(bad_range(zone, &page[size]));
602
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
603 604 605 606 607 608 609 610
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
611
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
612
{
Nick Piggin's avatar
Nick Piggin committed
613 614 615
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
616
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
617
		bad_page(page);
618
		return 1;
619
	}
620

621
	set_page_private(page, 0);
622
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
623 624

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
625
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
626 627 628 629 630 631 632

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

633
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
634 635
}

636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}


667 668 669 670 671
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
672 673 674 675
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
676 677
};

678 679
/*
 * Move the free pages in a range to the free lists of the requested type.
680
 * Note that start_page and end_pages are not aligned on a pageblock
681 682
 * boundary. If alignment is required, use move_freepages_block()
 */
Adrian Bunk's avatar
Adrian Bunk committed
683 684 685
static int move_freepages(struct zone *zone,
			  struct page *start_page, struct page *end_page,
			  int migratetype)
686 687 688
{
	struct page *page;
	unsigned long order;
689
	int pages_moved = 0;
690 691 692 693 694 695 696

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
697
	 * grouping pages by mobility
698 699 700 701 702
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
703 704 705
		/* Make sure we are not inadvertently changing nodes */
		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
		list_del(&page->lru);
		list_add(&page->lru,
			&zone->free_area[order].free_list[migratetype]);
		page += 1 << order;
721
		pages_moved += 1 << order;
722 723
	}

724
	return pages_moved;
725 726
}

Adrian Bunk's avatar
Adrian Bunk committed
727 728
static int move_freepages_block(struct zone *zone, struct page *page,
				int migratetype)
729 730 731 732 733
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

	start_pfn = page_to_pfn(page);
734
	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
735
	start_page = pfn_to_page(start_pfn);
736 737
	end_page = start_page + pageblock_nr_pages - 1;
	end_pfn = start_pfn + pageblock_nr_pages - 1;
738 739 740 741 742 743 744 745 746 747

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
}

748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
/* Remove an element from the buddy allocator from the fallback list */
static struct page *__rmqueue_fallback(struct zone *zone, int order,
						int start_migratetype)
{
	struct free_area * area;
	int current_order;
	struct page *page;
	int migratetype, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1; current_order >= order;
						--current_order) {
		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
			migratetype = fallbacks[start_migratetype][i];

763 764 765
			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
				continue;
766

767 768 769 770 771 772 773 774 775
			area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
				continue;

			page = list_entry(area->free_list[migratetype].next,
					struct page, lru);
			area->nr_free--;

			/*
776
			 * If breaking a large block of pages, move all free
777 778 779
			 * pages to the preferred allocation list. If falling
			 * back for a reclaimable kernel allocation, be more
			 * agressive about taking ownership of free pages
780
			 */
781
			if (unlikely(current_order >= (pageblock_order >> 1)) ||
782 783 784 785 786 787
					start_migratetype == MIGRATE_RECLAIMABLE) {
				unsigned long pages;
				pages = move_freepages_block(zone, page,
								start_migratetype);

				/* Claim the whole block if over half of it is free */
788
				if (pages >= (1 << (pageblock_order-1)))
789 790 791
					set_pageblock_migratetype(page,
								start_migratetype);

792
				migratetype = start_migratetype;
793
			}
794 795 796 797 798 799 800

			/* Remove the page from the freelists */
			list_del(&page->lru);
			rmv_page_order(page);
			__mod_zone_page_state(zone, NR_FREE_PAGES,
							-(1UL << order));

801
			if (current_order == pageblock_order)
802 803 804 805 806 807 808 809
				set_pageblock_migratetype(page,
							start_migratetype);

			expand(zone, page, order, current_order, area, migratetype);
			return page;
		}
	}

810 811
	/* Use MIGRATE_RESERVE rather than fail an allocation */
	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
812 813
}

814
/*
Linus Torvalds's avatar
Linus Torvalds committed
815 816 817
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
818 819
static struct page *__rmqueue(struct zone *zone, unsigned int order,
						int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
820 821 822
{
	struct page *page;

823
	page = __rmqueue_smallest(zone, order, migratetype);
824

825 826
	if (unlikely(!page))
		page = __rmqueue_fallback(zone, order, migratetype);
827 828

	return page;
Linus Torvalds's avatar
Linus Torvalds committed
829 830 831 832 833 834 835 836
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
837 838
			unsigned long count, struct list_head *list,
			int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
839 840 841
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
842
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
843
	for (i = 0; i < count; ++i) {
844
		struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
845
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
846
			break;
847 848 849 850 851 852 853 854 855 856

		/*
		 * Split buddy pages returned by expand() are received here
		 * in physical page order. The page is added to the callers and
		 * list and the list head then moves forward. From the callers
		 * perspective, the linked list is ordered by page number in
		 * some conditions. This is useful for IO devices that can
		 * merge IO requests if the physical pages are ordered
		 * properly.
		 */
857 858
		list_add(&page->lru, list);
		set_page_private(page, migratetype);
859
		list = &page->lru;
Linus Torvalds's avatar
Linus Torvalds committed
860
	}
Nick Piggin's avatar
Nick Piggin committed
861
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
862
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
863 864
}

865
#ifdef CONFIG_NUMA
866
/*
867 868 869 870
 * Called from the vmstat counter updater to drain pagesets of this
 * currently executing processor on remote nodes after they have
 * expired.
 *
871 872
 * Note that this function must be called with the thread pinned to
 * a single processor.
873
 */
874
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
875 876
{
	unsigned long flags;
877
	int to_drain;
878

879 880 881 882 883 884 885 886
	local_irq_save(flags);
	if (pcp->count >= pcp->batch)
		to_drain = pcp->batch;
	else
		to_drain = pcp->count;
	free_pages_bulk(zone, to_drain, &pcp->list, 0);
	pcp->count -= to_drain;
	local_irq_restore(flags);
887 888 889
}
#endif

890 891 892 893 894 895 896 897
/*
 * Drain pages of the indicated processor.
 *
 * The processor must either be the current processor and the
 * thread pinned to the current processor or a processor that
 * is not online.
 */
static void drain_pages(unsigned int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
898
{
Nick Piggin's avatar
Nick Piggin committed
899
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
900 901 902 903
	struct zone *zone;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;
904
		struct per_cpu_pages *pcp;
Linus Torvalds's avatar
Linus Torvalds committed
905

906 907 908
		if (!populated_zone(zone))
			continue;

909
		pset = zone_pcp(zone, cpu);
910 911 912 913 914 915

		pcp = &pset->pcp;
		local_irq_save(flags);
		free_pages_bulk(zone, pcp->count, &pcp->list, 0);
		pcp->count = 0;
		local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
916 917 918
	}
}

919 920 921 922 923 924 925 926 927 928 929 930 931
/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void *arg)
{
	drain_pages(smp_processor_id());
}

/*
 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 */
void drain_all_pages(void)
{
932
	on_each_cpu(drain_local_pages, NULL, 1);
933 934
}

935
#ifdef CONFIG_HIBERNATION
Linus Torvalds's avatar
Linus Torvalds committed
936 937 938

void mark_free_pages(struct zone *zone)
{
939 940
	unsigned long pfn, max_zone_pfn;
	unsigned long flags;
941
	int order, t;
Linus Torvalds's avatar
Linus Torvalds committed
942 943 944 945 946 947
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
948 949 950 951 952 953

	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);

954 955
			if (!swsusp_page_is_forbidden(page))
				swsusp_unset_page_free(page);
956
		}
Linus Torvalds's avatar
Linus Torvalds committed
957

958 959
	for_each_migratetype_order(order, t) {
		list_for_each(curr, &zone->free_area[order].free_list[t]) {
960
			unsigned long i;
Linus Torvalds's avatar
Linus Torvalds committed
961

962 963
			pfn = page_to_pfn(list_entry(curr, struct page, lru));
			for (i = 0; i < (1UL << order); i++)
964
				swsusp_set_page_free(pfn_to_page(pfn + i));
965
		}
966
	}
Linus Torvalds's avatar
Linus Torvalds committed
967 968
	spin_unlock_irqrestore(&zone->lock, flags);
}
969
#endif /* CONFIG_PM */
Linus Torvalds's avatar
Linus Torvalds committed
970 971 972 973

/*
 * Free a 0-order page
 */
Harvey Harrison's avatar
Harvey Harrison committed
974
static void free_hot_cold_page(struct page *page, int cold)
Linus Torvalds's avatar
Linus Torvalds committed
975 976 977 978 979 980 981
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	if (PageAnon(page))
		page->mapping = NULL;
982
	if (free_pages_check(page))
983