page_alloc.c 121 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
24
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
35
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
38
#include <linux/mempolicy.h>
39
#include <linux/stop_machine.h>
40 41
#include <linux/sort.h>
#include <linux/pfn.h>
42
#include <linux/backing-dev.h>
43
#include <linux/fault-inject.h>
Linus Torvalds's avatar
Linus Torvalds committed
44 45

#include <asm/tlbflush.h>
46
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
47 48 49
#include "internal.h"

/*
50
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
51
 */
52 53 54 55 56 57 58 59 60 61 62 63 64
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

65
unsigned long totalram_pages __read_mostly;
66
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
67
long nr_swap_pages;
68
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
69

70
static void __free_pages_ok(struct page *page, unsigned int order);
71

Linus Torvalds's avatar
Linus Torvalds committed
72 73 74 75 76 77 78
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
79 80 81
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
82
 */
83
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
84
#ifdef CONFIG_ZONE_DMA
85
	 256,
86
#endif
87
#ifdef CONFIG_ZONE_DMA32
88
	 256,
89
#endif
90
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
91
	 32,
92
#endif
Mel Gorman's avatar
Mel Gorman committed
93
	 32,
94
};
Linus Torvalds's avatar
Linus Torvalds committed
95 96 97

EXPORT_SYMBOL(totalram_pages);

98
static char * const zone_names[MAX_NR_ZONES] = {
99
#ifdef CONFIG_ZONE_DMA
100
	 "DMA",
101
#endif
102
#ifdef CONFIG_ZONE_DMA32
103
	 "DMA32",
104
#endif
105
	 "Normal",
106
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
107
	 "HighMem",
108
#endif
Mel Gorman's avatar
Mel Gorman committed
109
	 "Movable",
110 111
};

Linus Torvalds's avatar
Linus Torvalds committed
112 113
int min_free_kbytes = 1024;

114 115
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
116
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
117

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
   * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
   * ranges of memory (RAM) that may be registered with add_active_range().
   * Ranges passed to add_active_range() will be merged if possible
   * so the number of times add_active_range() can be called is
   * related to the number of nodes and the number of holes
   */
  #ifdef CONFIG_MAX_ACTIVE_REGIONS
    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  #else
    #if MAX_NUMNODES >= 32
      /* If there can be many nodes, allow up to 50 holes per node */
      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
    #else
      /* By default, allow up to 256 distinct regions */
      #define MAX_ACTIVE_REGIONS 256
    #endif
  #endif

139 140 141 142
  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  static int __meminitdata nr_nodemap_entries;
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
143
#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
144 145
  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
146
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
Mel Gorman's avatar
Mel Gorman committed
147
  unsigned long __initdata required_kernelcore;
148
  unsigned long __initdata required_movablecore;
149
  unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gorman's avatar
Mel Gorman committed
150 151 152 153

  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  int movable_zone;
  EXPORT_SYMBOL(movable_zone);
154 155
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */

Miklos Szeredi's avatar
Miklos Szeredi committed
156 157 158 159 160
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
#endif

161 162
int page_group_by_mobility_disabled __read_mostly;

163 164
static inline int get_pageblock_migratetype(struct page *page)
{
165 166 167
	if (unlikely(page_group_by_mobility_disabled))
		return MIGRATE_UNMOVABLE;

168 169 170 171 172 173 174 175 176
	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
}

static void set_pageblock_migratetype(struct page *page, int migratetype)
{
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

177
static inline int allocflags_to_migratetype(gfp_t gfp_flags)
178
{
179 180
	WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);

181 182 183
	if (unlikely(page_group_by_mobility_disabled))
		return MIGRATE_UNMOVABLE;

184
	/* Cluster based on mobility */
185 186
	return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
		((gfp_flags & __GFP_RECLAIMABLE) != 0);
187 188
}

Nick Piggin's avatar
Nick Piggin committed
189
#ifdef CONFIG_DEBUG_VM
190
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
191
{
192 193 194
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
195

196 197 198 199 200 201 202 203 204
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
205 206 207 208
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
209
	if (!pfn_valid_within(page_to_pfn(page)))
210
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
211
	if (zone != page_zone(page))
212 213 214 215 216 217 218 219 220 221
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
222
		return 1;
223 224 225
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
226 227
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
228 229 230 231 232 233 234
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

235
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
236
{
237
	printk(KERN_EMERG "Bad page state in process '%s'\n"
238 239 240
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
241 242 243
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
244
	dump_stack();
245 246
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
247 248 249
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
250 251
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
252
			1 << PG_swapcache |
253 254
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
255 256 257
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
258
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
259 260 261 262 263 264 265 266 267 268 269 270
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
271 272 273
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
274
 */
275 276 277

static void free_compound_page(struct page *page)
{
278
	__free_pages_ok(page, compound_order(page));
279 280
}

Linus Torvalds's avatar
Linus Torvalds committed
281 282 283 284 285
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

286
	set_compound_page_dtor(page, free_compound_page);
287
	set_compound_order(page, order);
288
	__SetPageHead(page);
289
	for (i = 1; i < nr_pages; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
290 291
		struct page *p = page + i;

292 293
		__SetPageTail(p);
		p->first_page = page;
Linus Torvalds's avatar
Linus Torvalds committed
294 295 296 297 298 299 300 301
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

302
	if (unlikely(compound_order(page) != order))
303
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
304

305
	if (unlikely(!PageHead(page)))
306
			bad_page(page);
307
	__ClearPageHead(page);
308
	for (i = 1; i < nr_pages; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
309 310
		struct page *p = page + i;

311
		if (unlikely(!PageTail(p) |
312
				(p->first_page != page)))
313
			bad_page(page);
314
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
315 316 317
	}
}

Nick Piggin's avatar
Nick Piggin committed
318 319 320 321
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
322
	VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
323 324 325 326
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
327
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
328 329 330 331
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

Linus Torvalds's avatar
Linus Torvalds committed
332 333 334 335 336
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
337 338
static inline unsigned long page_order(struct page *page)
{
339
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
340 341
}

342 343
static inline void set_page_order(struct page *page, int order)
{
344
	set_page_private(page, order);
345
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
346 347 348 349
}

static inline void rmv_page_order(struct page *page)
{
350
	__ClearPageBuddy(page);
351
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
369
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
388
 * (a) the buddy is not in a hole &&
389
 * (b) the buddy is in the buddy system &&
390 391
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
392 393 394
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
395
 *
396
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
397
 */
398 399
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
400
{
401
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
402 403
		return 0;

404 405 406 407 408
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		BUG_ON(page_count(buddy) != 0);
409
		return 1;
410
	}
411
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
427
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
428
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
429 430 431 432 433 434 435 436 437
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
438
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
439 440 441 442
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;
443
	int migratetype = get_pageblock_migratetype(page);
Linus Torvalds's avatar
Linus Torvalds committed
444

445
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
446 447 448 449
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Nick Piggin's avatar
Nick Piggin committed
450 451
	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
452

453
	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
Linus Torvalds's avatar
Linus Torvalds committed
454 455 456 457 458
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
459
		if (!page_is_buddy(page, buddy, order))
Linus Torvalds's avatar
Linus Torvalds committed
460
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
461

Linus Torvalds's avatar
Linus Torvalds committed
462
		list_del(&buddy->lru);
463
		zone->free_area[order].nr_free--;
Linus Torvalds's avatar
Linus Torvalds committed
464
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
465
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
466 467 468 469 470
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
471 472
	list_add(&page->lru,
		&zone->free_area[order].free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
473 474 475
	zone->free_area[order].nr_free++;
}

476
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
477
{
Nick Piggin's avatar
Nick Piggin committed
478 479 480
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
481 482 483 484 485 486 487
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
488
			1 << PG_writeback |
489 490
			1 << PG_reserved |
			1 << PG_buddy ))))
491
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
492
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
493
		__ClearPageDirty(page);
494 495 496 497 498 499
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
500 501 502 503 504
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
505
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
506 507 508 509 510 511 512
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
513 514
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
515
{
Nick Piggin's avatar
Nick Piggin committed
516
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
517 518
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
519 520 521
	while (count--) {
		struct page *page;

Nick Piggin's avatar
Nick Piggin committed
522
		VM_BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
523
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
524
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
525
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
526
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
527
	}
Nick Piggin's avatar
Nick Piggin committed
528
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
529 530
}

Nick Piggin's avatar
Nick Piggin committed
531
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
532
{
533 534 535
	spin_lock(&zone->lock);
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
536
	__free_one_page(page, zone, order);
537
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
538 539 540 541 542
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
543
	int i;
544
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
545 546

	for (i = 0 ; i < (1 << order) ; ++i)
547
		reserved += free_pages_check(page + i);
548 549 550
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
551 552
	if (!PageHighMem(page))
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
Nick Piggin's avatar
Nick Piggin committed
553
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
554
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
555

Nick Piggin's avatar
Nick Piggin committed
556
	local_irq_save(flags);
557
	__count_vm_events(PGFREE, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
558
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
559
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
560 561
}

562 563 564 565 566 567 568 569
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
570
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
571
		__free_page(page);
572 573 574
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
575
		prefetchw(page);
576 577 578
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
579 580
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
581 582 583 584
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

585
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
586
		__free_pages(page, order);
587 588 589
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
605
static inline void expand(struct zone *zone, struct page *page,
606 607
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
608 609 610 611 612 613 614
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
615
		VM_BUG_ON(bad_range(zone, &page[size]));
616
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
617 618 619 620 621 622 623 624
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
625
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
626
{
Nick Piggin's avatar
Nick Piggin committed
627 628 629
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
630 631
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
632 633 634 635
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
636
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
637
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
638
			1 << PG_writeback |
639 640
			1 << PG_reserved |
			1 << PG_buddy ))))
641
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
642

643 644 645 646 647 648 649
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

650
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
Linus Torvalds's avatar
Linus Torvalds committed
651
			1 << PG_referenced | 1 << PG_arch_1 |
652
			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
653
	set_page_private(page, 0);
654
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
655 656

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
657
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
658 659 660 661 662 663 664

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

665
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
666 667
}

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}


699 700 701 702 703
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
704 705 706 707
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
708 709
};

710 711 712 713 714 715 716 717 718 719 720
/*
 * Move the free pages in a range to the free lists of the requested type.
 * Note that start_page and end_pages are not aligned in a MAX_ORDER_NR_PAGES
 * boundary. If alignment is required, use move_freepages_block()
 */
int move_freepages(struct zone *zone,
			struct page *start_page, struct page *end_page,
			int migratetype)
{
	struct page *page;
	unsigned long order;
721
	int pages_moved = 0;
722 723 724 725 726 727 728

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
729
	 * grouping pages by mobility
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
		list_del(&page->lru);
		list_add(&page->lru,
			&zone->free_area[order].free_list[migratetype]);
		page += 1 << order;
750
		pages_moved += 1 << order;
751 752
	}

753
	return pages_moved;
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
}

int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

	start_pfn = page_to_pfn(page);
	start_pfn = start_pfn & ~(MAX_ORDER_NR_PAGES-1);
	start_page = pfn_to_page(start_pfn);
	end_page = start_page + MAX_ORDER_NR_PAGES - 1;
	end_pfn = start_pfn + MAX_ORDER_NR_PAGES - 1;

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
}

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
/* Return the page with the lowest PFN in the list */
static struct page *min_page(struct list_head *list)
{
	unsigned long min_pfn = -1UL;
	struct page *min_page = NULL, *page;;

	list_for_each_entry(page, list, lru) {
		unsigned long pfn = page_to_pfn(page);
		if (pfn < min_pfn) {
			min_pfn = pfn;
			min_page = page;
		}
	}

	return min_page;
}

793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
/* Remove an element from the buddy allocator from the fallback list */
static struct page *__rmqueue_fallback(struct zone *zone, int order,
						int start_migratetype)
{
	struct free_area * area;
	int current_order;
	struct page *page;
	int migratetype, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1; current_order >= order;
						--current_order) {
		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
			migratetype = fallbacks[start_migratetype][i];

808 809 810
			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
				continue;
811

812 813 814 815
			area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
				continue;

816
			/* Bias kernel allocations towards low pfns */
817 818
			page = list_entry(area->free_list[migratetype].next,
					struct page, lru);
819 820
			if (unlikely(start_migratetype != MIGRATE_MOVABLE))
				page = min_page(&area->free_list[migratetype]);
821 822 823
			area->nr_free--;

			/*
824
			 * If breaking a large block of pages, move all free
825 826 827
			 * pages to the preferred allocation list. If falling
			 * back for a reclaimable kernel allocation, be more
			 * agressive about taking ownership of free pages
828
			 */
829 830 831 832 833 834 835
			if (unlikely(current_order >= MAX_ORDER / 2) ||
					start_migratetype == MIGRATE_RECLAIMABLE) {
				unsigned long pages;
				pages = move_freepages_block(zone, page,
								start_migratetype);

				/* Claim the whole block if over half of it is free */
836
				if (pages >= (1 << (MAX_ORDER-2)))
837 838 839
					set_pageblock_migratetype(page,
								start_migratetype);

840
				migratetype = start_migratetype;
841
			}
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857

			/* Remove the page from the freelists */
			list_del(&page->lru);
			rmv_page_order(page);
			__mod_zone_page_state(zone, NR_FREE_PAGES,
							-(1UL << order));

			if (current_order == MAX_ORDER - 1)
				set_pageblock_migratetype(page,
							start_migratetype);

			expand(zone, page, order, current_order, area, migratetype);
			return page;
		}
	}

858 859
	/* Use MIGRATE_RESERVE rather than fail an allocation */
	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
860 861
}

862
/*
Linus Torvalds's avatar
Linus Torvalds committed
863 864 865
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
866 867
static struct page *__rmqueue(struct zone *zone, unsigned int order,
						int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
868 869 870
{
	struct page *page;

871
	page = __rmqueue_smallest(zone, order, migratetype);
872

873 874
	if (unlikely(!page))
		page = __rmqueue_fallback(zone, order, migratetype);
875 876

	return page;
Linus Torvalds's avatar
Linus Torvalds committed
877 878 879 880 881 882 883 884
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
885 886
			unsigned long count, struct list_head *list,
			int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
887 888 889
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
890
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
891
	for (i = 0; i < count; ++i) {
892
		struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
893
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
894
			break;
895 896
		list_add(&page->lru, list);
		set_page_private(page, migratetype);
Linus Torvalds's avatar
Linus Torvalds committed
897
	}
Nick Piggin's avatar
Nick Piggin committed
898
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
899
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
900 901
}

902
#ifdef CONFIG_NUMA
903
/*
904 905 906 907
 * Called from the vmstat counter updater to drain pagesets of this
 * currently executing processor on remote nodes after they have
 * expired.
 *
908 909
 * Note that this function must be called with the thread pinned to
 * a single processor.
910
 */
911
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
912 913
{
	unsigned long flags;
914
	int to_drain;
915

916 917 918 919 920 921 922 923
	local_irq_save(flags);
	if (pcp->count >= pcp->batch)
		to_drain = pcp->batch;
	else
		to_drain = pcp->count;
	free_pages_bulk(zone, to_drain, &pcp->list, 0);
	pcp->count -= to_drain;
	local_irq_restore(flags);
924 925 926
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
927 928
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
929
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
930 931 932 933 934 935
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

936 937 938
		if (!populated_zone(zone))
			continue;

939
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
940 941 942 943
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
944
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
945 946
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
947
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
948 949 950 951
		}
	}
}

952
#ifdef CONFIG_HIBERNATION
Linus Torvalds's avatar
Linus Torvalds committed
953 954 955

void mark_free_pages(struct zone *zone)
{
956 957
	unsigned long pfn, max_zone_pfn;
	unsigned long flags;
958
	int order, t;
Linus Torvalds's avatar
Linus Torvalds committed
959 960 961 962 963 964
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
965 966 967 968 969 970

	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);

971 972
			if (!swsusp_page_is_forbidden(page))
				swsusp_unset_page_free(page);
973
		}
Linus Torvalds's avatar
Linus Torvalds committed
974

975 976
	for_each_migratetype_order(order, t) {
		list_for_each(curr, &zone->free_area[order].free_list[t]) {
977
			unsigned long i;
Linus Torvalds's avatar
Linus Torvalds committed
978

979 980
			pfn = page_to_pfn(list_entry(curr, struct page, lru));
			for (i = 0; i < (1UL << order); i++)
981
				swsusp_set_page_free(pfn_to_page(pfn + i));
982
		}
983
	}
Linus Torvalds's avatar
Linus Torvalds committed
984 985
	spin_unlock_irqrestore(&zone->lock, flags);
}
986
#endif /* CONFIG_PM */
Linus Torvalds's avatar
Linus Torvalds committed
987 988 989 990 991 992 993 994 995 996 997 998

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017

void smp_drain_local_pages(void *arg)
{
	drain_local_pages();
}

/*
 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 */
void drain_all_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);

	smp_call_function(smp_drain_local_pages, NULL, 0, 1);
}
Linus Torvalds's avatar
Linus Torvalds committed
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	if (PageAnon(page))
		page->mapping = NULL;
1030
	if (free_pages_check(page))
1031 1032
		return;

Nick Piggin's avatar
Nick Piggin committed
1033 1034
	if (!PageHighMem(page))
		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
Nick Piggin's avatar
Nick Piggin committed