page_alloc.c 123 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
24
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
35
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
38
#include <linux/mempolicy.h>
39
#include <linux/stop_machine.h>
40 41
#include <linux/sort.h>
#include <linux/pfn.h>
42
#include <linux/backing-dev.h>
43
#include <linux/fault-inject.h>
44
#include <linux/page-isolation.h>
Linus Torvalds's avatar
Linus Torvalds committed
45 46

#include <asm/tlbflush.h>
47
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
48 49 50
#include "internal.h"

/*
51
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
52
 */
53 54 55 56 57 58 59 60 61 62 63 64 65
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

66
unsigned long totalram_pages __read_mostly;
67
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
68
long nr_swap_pages;
69
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
70

71 72 73 74
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

75
static void __free_pages_ok(struct page *page, unsigned int order);
76

Linus Torvalds's avatar
Linus Torvalds committed
77 78 79 80 81 82 83
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
84 85 86
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
87
 */
88
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
89
#ifdef CONFIG_ZONE_DMA
90
	 256,
91
#endif
92
#ifdef CONFIG_ZONE_DMA32
93
	 256,
94
#endif
95
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
96
	 32,
97
#endif
Mel Gorman's avatar
Mel Gorman committed
98
	 32,
99
};
Linus Torvalds's avatar
Linus Torvalds committed
100 101 102

EXPORT_SYMBOL(totalram_pages);

103
static char * const zone_names[MAX_NR_ZONES] = {
104
#ifdef CONFIG_ZONE_DMA
105
	 "DMA",
106
#endif
107
#ifdef CONFIG_ZONE_DMA32
108
	 "DMA32",
109
#endif
110
	 "Normal",
111
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
112
	 "HighMem",
113
#endif
Mel Gorman's avatar
Mel Gorman committed
114
	 "Movable",
115 116
};

Linus Torvalds's avatar
Linus Torvalds committed
117 118
int min_free_kbytes = 1024;

119 120
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
121
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
   * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
   * ranges of memory (RAM) that may be registered with add_active_range().
   * Ranges passed to add_active_range() will be merged if possible
   * so the number of times add_active_range() can be called is
   * related to the number of nodes and the number of holes
   */
  #ifdef CONFIG_MAX_ACTIVE_REGIONS
    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  #else
    #if MAX_NUMNODES >= 32
      /* If there can be many nodes, allow up to 50 holes per node */
      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
    #else
      /* By default, allow up to 256 distinct regions */
      #define MAX_ACTIVE_REGIONS 256
    #endif
  #endif

144 145 146 147
  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  static int __meminitdata nr_nodemap_entries;
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
148
#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
149 150
  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
151
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
Mel Gorman's avatar
Mel Gorman committed
152
  unsigned long __initdata required_kernelcore;
153
  static unsigned long __initdata required_movablecore;
154
  unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gorman's avatar
Mel Gorman committed
155 156 157 158

  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  int movable_zone;
  EXPORT_SYMBOL(movable_zone);
159 160
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */

Miklos Szeredi's avatar
Miklos Szeredi committed
161 162 163 164 165
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
#endif

166 167
int page_group_by_mobility_disabled __read_mostly;

168 169 170 171 172 173
static void set_pageblock_migratetype(struct page *page, int migratetype)
{
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

Nick Piggin's avatar
Nick Piggin committed
174
#ifdef CONFIG_DEBUG_VM
175
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
176
{
177 178 179
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
180

181 182 183 184 185 186 187 188 189
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
190 191 192 193
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
194
	if (!pfn_valid_within(page_to_pfn(page)))
195
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
196
	if (zone != page_zone(page))
197 198 199 200 201 202 203 204 205 206
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
207
		return 1;
208 209 210
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
211 212
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
213 214 215 216 217 218 219
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

220
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
221
{
222
	printk(KERN_EMERG "Bad page state in process '%s'\n"
223 224 225
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
226 227 228
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
229
	dump_stack();
230 231
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
232 233 234
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
235 236
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
237
			1 << PG_swapcache |
238 239
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
240 241 242
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
243
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
244 245 246 247 248 249 250 251 252 253 254 255
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
256 257 258
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
259
 */
260 261 262

static void free_compound_page(struct page *page)
{
263
	__free_pages_ok(page, compound_order(page));
264 265
}

Linus Torvalds's avatar
Linus Torvalds committed
266 267 268 269 270
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

271
	set_compound_page_dtor(page, free_compound_page);
272
	set_compound_order(page, order);
273
	__SetPageHead(page);
274
	for (i = 1; i < nr_pages; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
275 276
		struct page *p = page + i;

277 278
		__SetPageTail(p);
		p->first_page = page;
Linus Torvalds's avatar
Linus Torvalds committed
279 280 281 282 283 284 285 286
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

287
	if (unlikely(compound_order(page) != order))
288
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
289

290
	if (unlikely(!PageHead(page)))
291
			bad_page(page);
292
	__ClearPageHead(page);
293
	for (i = 1; i < nr_pages; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
294 295
		struct page *p = page + i;

296
		if (unlikely(!PageTail(p) |
297
				(p->first_page != page)))
298
			bad_page(page);
299
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
300 301 302
	}
}

Nick Piggin's avatar
Nick Piggin committed
303 304 305 306
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
307
	VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
308 309 310 311
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
312
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
313 314 315 316
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

317 318
static inline void set_page_order(struct page *page, int order)
{
319
	set_page_private(page, order);
320
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
321 322 323 324
}

static inline void rmv_page_order(struct page *page)
{
325
	__ClearPageBuddy(page);
326
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
344
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
363
 * (a) the buddy is not in a hole &&
364
 * (b) the buddy is in the buddy system &&
365 366
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
367 368 369
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
370
 *
371
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
372
 */
373 374
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
375
{
376
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
377 378
		return 0;

379 380 381 382 383
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		BUG_ON(page_count(buddy) != 0);
384
		return 1;
385
	}
386
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
402
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
403
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
404 405 406 407 408 409 410 411 412
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
413
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
414 415 416 417
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;
418
	int migratetype = get_pageblock_migratetype(page);
Linus Torvalds's avatar
Linus Torvalds committed
419

420
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
421 422 423 424
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Nick Piggin's avatar
Nick Piggin committed
425 426
	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
427

428
	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
Linus Torvalds's avatar
Linus Torvalds committed
429 430 431 432 433
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
434
		if (!page_is_buddy(page, buddy, order))
Linus Torvalds's avatar
Linus Torvalds committed
435
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
436

Linus Torvalds's avatar
Linus Torvalds committed
437
		list_del(&buddy->lru);
438
		zone->free_area[order].nr_free--;
Linus Torvalds's avatar
Linus Torvalds committed
439
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
440
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
441 442 443 444 445
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
446 447
	list_add(&page->lru,
		&zone->free_area[order].free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
448 449 450
	zone->free_area[order].nr_free++;
}

451
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
452
{
Nick Piggin's avatar
Nick Piggin committed
453 454 455
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
456 457 458 459 460 461 462
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
463
			1 << PG_writeback |
464 465
			1 << PG_reserved |
			1 << PG_buddy ))))
466
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
467
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
468
		__ClearPageDirty(page);
469 470 471 472 473 474
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
475 476 477 478 479
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
480
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
481 482 483 484 485 486 487
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
488 489
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
490
{
Nick Piggin's avatar
Nick Piggin committed
491
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
492 493
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
494 495 496
	while (count--) {
		struct page *page;

Nick Piggin's avatar
Nick Piggin committed
497
		VM_BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
498
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
499
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
500
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
501
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
502
	}
Nick Piggin's avatar
Nick Piggin committed
503
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
504 505
}

Nick Piggin's avatar
Nick Piggin committed
506
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
507
{
508 509 510
	spin_lock(&zone->lock);
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
511
	__free_one_page(page, zone, order);
512
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
513 514 515 516 517
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
518
	int i;
519
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
520 521

	for (i = 0 ; i < (1 << order) ; ++i)
522
		reserved += free_pages_check(page + i);
523 524 525
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
526 527
	if (!PageHighMem(page))
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
Nick Piggin's avatar
Nick Piggin committed
528
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
529
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
530

Nick Piggin's avatar
Nick Piggin committed
531
	local_irq_save(flags);
532
	__count_vm_events(PGFREE, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
533
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
534
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
535 536
}

537 538 539 540 541 542 543 544
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
545
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
546
		__free_page(page);
547 548 549
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
550
		prefetchw(page);
551 552 553
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
554 555
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
556 557 558 559
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

560
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
561
		__free_pages(page, order);
562 563 564
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
580
static inline void expand(struct zone *zone, struct page *page,
581 582
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
583 584 585 586 587 588 589
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
590
		VM_BUG_ON(bad_range(zone, &page[size]));
591
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
592 593 594 595 596 597 598 599
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
600
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
601
{
Nick Piggin's avatar
Nick Piggin committed
602 603 604
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
605 606
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
607 608 609 610
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
611
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
612
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
613
			1 << PG_writeback |
614 615
			1 << PG_reserved |
			1 << PG_buddy ))))
616
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
617

618 619 620 621 622 623 624
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

625
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
Linus Torvalds's avatar
Linus Torvalds committed
626
			1 << PG_referenced | 1 << PG_arch_1 |
627
			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
628
	set_page_private(page, 0);
629
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
630 631

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
632
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
633 634 635 636 637 638 639

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

640
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
641 642
}

643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}


674 675 676 677 678
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
679 680 681 682
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
683 684
};

685 686
/*
 * Move the free pages in a range to the free lists of the requested type.
687
 * Note that start_page and end_pages are not aligned on a pageblock
688 689 690 691 692 693 694 695
 * boundary. If alignment is required, use move_freepages_block()
 */
int move_freepages(struct zone *zone,
			struct page *start_page, struct page *end_page,
			int migratetype)
{
	struct page *page;
	unsigned long order;
696
	int pages_moved = 0;
697 698 699 700 701 702 703

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
704
	 * grouping pages by mobility
705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
		list_del(&page->lru);
		list_add(&page->lru,
			&zone->free_area[order].free_list[migratetype]);
		page += 1 << order;
725
		pages_moved += 1 << order;
726 727
	}

728
	return pages_moved;
729 730 731 732 733 734 735 736
}

int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

	start_pfn = page_to_pfn(page);
737
	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
738
	start_page = pfn_to_page(start_pfn);
739 740
	end_page = start_page + pageblock_nr_pages - 1;
	end_pfn = start_pfn + pageblock_nr_pages - 1;
741 742 743 744 745 746 747 748 749 750

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
}

751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
/* Return the page with the lowest PFN in the list */
static struct page *min_page(struct list_head *list)
{
	unsigned long min_pfn = -1UL;
	struct page *min_page = NULL, *page;;

	list_for_each_entry(page, list, lru) {
		unsigned long pfn = page_to_pfn(page);
		if (pfn < min_pfn) {
			min_pfn = pfn;
			min_page = page;
		}
	}

	return min_page;
}

768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
/* Remove an element from the buddy allocator from the fallback list */
static struct page *__rmqueue_fallback(struct zone *zone, int order,
						int start_migratetype)
{
	struct free_area * area;
	int current_order;
	struct page *page;
	int migratetype, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1; current_order >= order;
						--current_order) {
		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
			migratetype = fallbacks[start_migratetype][i];

783 784 785
			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
				continue;
786

787 788 789 790
			area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
				continue;

791
			/* Bias kernel allocations towards low pfns */
792 793
			page = list_entry(area->free_list[migratetype].next,
					struct page, lru);
794 795
			if (unlikely(start_migratetype != MIGRATE_MOVABLE))
				page = min_page(&area->free_list[migratetype]);
796 797 798
			area->nr_free--;

			/*
799
			 * If breaking a large block of pages, move all free
800 801 802
			 * pages to the preferred allocation list. If falling
			 * back for a reclaimable kernel allocation, be more
			 * agressive about taking ownership of free pages
803
			 */
804
			if (unlikely(current_order >= (pageblock_order >> 1)) ||
805 806 807 808 809 810
					start_migratetype == MIGRATE_RECLAIMABLE) {
				unsigned long pages;
				pages = move_freepages_block(zone, page,
								start_migratetype);

				/* Claim the whole block if over half of it is free */
811
				if (pages >= (1 << (pageblock_order-1)))
812 813 814
					set_pageblock_migratetype(page,
								start_migratetype);

815
				migratetype = start_migratetype;
816
			}
817 818 819 820 821 822 823

			/* Remove the page from the freelists */
			list_del(&page->lru);
			rmv_page_order(page);
			__mod_zone_page_state(zone, NR_FREE_PAGES,
							-(1UL << order));

824
			if (current_order == pageblock_order)
825 826 827 828 829 830 831 832
				set_pageblock_migratetype(page,
							start_migratetype);

			expand(zone, page, order, current_order, area, migratetype);
			return page;
		}
	}

833 834
	/* Use MIGRATE_RESERVE rather than fail an allocation */
	return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
835 836
}

837
/*
Linus Torvalds's avatar
Linus Torvalds committed
838 839 840
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
841 842
static struct page *__rmqueue(struct zone *zone, unsigned int order,
						int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
843 844 845
{
	struct page *page;

846
	page = __rmqueue_smallest(zone, order, migratetype);
847

848 849
	if (unlikely(!page))
		page = __rmqueue_fallback(zone, order, migratetype);
850 851

	return page;
Linus Torvalds's avatar
Linus Torvalds committed
852 853 854 855 856 857 858 859
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
860 861
			unsigned long count, struct list_head *list,
			int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
862 863 864
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
865
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
866
	for (i = 0; i < count; ++i) {
867
		struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
868
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
869
			break;
870 871
		list_add(&page->lru, list);
		set_page_private(page, migratetype);
Linus Torvalds's avatar
Linus Torvalds committed
872
	}
Nick Piggin's avatar
Nick Piggin committed
873
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
874
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
875 876
}

877
#ifdef CONFIG_NUMA
878
/*
879 880 881 882
 * Called from the vmstat counter updater to drain pagesets of this
 * currently executing processor on remote nodes after they have
 * expired.
 *
883 884
 * Note that this function must be called with the thread pinned to
 * a single processor.
885
 */
886
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
887 888
{
	unsigned long flags;
889
	int to_drain;
890

891 892 893 894 895 896 897 898
	local_irq_save(flags);
	if (pcp->count >= pcp->batch)
		to_drain = pcp->batch;
	else
		to_drain = pcp->count;
	free_pages_bulk(zone, to_drain, &pcp->list, 0);
	pcp->count -= to_drain;
	local_irq_restore(flags);
899 900 901
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
902 903
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
904
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
905 906 907 908 909 910
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

911 912 913
		if (!populated_zone(zone))
			continue;

914
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
915 916 917 918
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
919
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
920 921
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
922
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
923 924 925 926
		}
	}
}

927
#ifdef CONFIG_HIBERNATION
Linus Torvalds's avatar
Linus Torvalds committed
928 929 930

void mark_free_pages(struct zone *zone)
{
931 932
	unsigned long pfn, max_zone_pfn;
	unsigned long flags;
933
	int order, t;
Linus Torvalds's avatar
Linus Torvalds committed
934 935 936 937 938 939
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
940 941 942 943 944 945

	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);

946 947
			if (!swsusp_page_is_forbidden(page))
				swsusp_unset_page_free(page);
948
		}
Linus Torvalds's avatar
Linus Torvalds committed
949

950 951
	for_each_migratetype_order(order, t) {
		list_for_each(curr, &zone->free_area[order].free_list[t]) {
952
			unsigned long i;
Linus Torvalds's avatar
Linus Torvalds committed
953

954 955
			pfn = page_to_pfn(list_entry(curr, struct page, lru));
			for (i = 0; i < (1UL << order); i++)
956
				swsusp_set_page_free(pfn_to_page(pfn + i));
957
		}
958
	}
Linus Torvalds's avatar
Linus Torvalds committed
959 960
	spin_unlock_irqrestore(&zone->lock, flags);
}
961
#endif /* CONFIG_PM */
Linus Torvalds's avatar
Linus Torvalds committed
962 963 964 965 966 967 968 969 970 971 972 973

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992

void smp_drain_local_pages(void *arg)
{
	drain_local_pages();
}

/*
 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 */
void drain_all_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);

	smp_call_function(smp_drain_local_pages, NULL, 0, 1);
}
Linus Torvalds's avatar
Linus Torvalds committed
993 994 995 996 997 998 999 1000 1001 1002 1003 1004

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	if (PageAnon(page))
		page->mapping = NULL;
1005
	if (free_pages_check(page))
1006 1007
		return;

Nick Piggin's avatar
Nick Piggin committed
1008 1009
	if (!PageHighMem(page))
		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
Nick Piggin's avatar
Nick Piggin committed
1010
	arch_free_page(page, 0);
1011 1012
	kernel_map_pages(page, 1, 0);

1013
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
1014
	local_irq_save(flags);
1015
	__count_vm_event(PGFREE);
Linus Torvalds's avatar
Linus Torvalds committed
1016
	list_add(&page->lru, &pcp->list);
1017
	set_page_private(page, get_pageblock_migratetype(page));
Linus Torvalds's avatar
Linus Torvalds committed