page_alloc.c 111 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
24
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
35
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
38
#include <linux/mempolicy.h>
39
#include <linux/stop_machine.h>
40 41
#include <linux/sort.h>
#include <linux/pfn.h>
42
#include <linux/backing-dev.h>
43
#include <linux/fault-inject.h>
Linus Torvalds's avatar
Linus Torvalds committed
44 45

#include <asm/tlbflush.h>
46
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
47 48 49
#include "internal.h"

/*
50
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
51
 */
52 53 54 55 56 57 58 59 60 61 62 63 64
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

65
unsigned long totalram_pages __read_mostly;
66
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
67
long nr_swap_pages;
68
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
69

70
static void __free_pages_ok(struct page *page, unsigned int order);
71

Linus Torvalds's avatar
Linus Torvalds committed
72 73 74 75 76 77 78
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
79 80 81
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
82
 */
83
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
84
#ifdef CONFIG_ZONE_DMA
85
	 256,
86
#endif
87
#ifdef CONFIG_ZONE_DMA32
88
	 256,
89
#endif
90
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
91
	 32,
92
#endif
Mel Gorman's avatar
Mel Gorman committed
93
	 32,
94
};
Linus Torvalds's avatar
Linus Torvalds committed
95 96 97

EXPORT_SYMBOL(totalram_pages);

98
static char * const zone_names[MAX_NR_ZONES] = {
99
#ifdef CONFIG_ZONE_DMA
100
	 "DMA",
101
#endif
102
#ifdef CONFIG_ZONE_DMA32
103
	 "DMA32",
104
#endif
105
	 "Normal",
106
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
107
	 "HighMem",
108
#endif
Mel Gorman's avatar
Mel Gorman committed
109
	 "Movable",
110 111
};

Linus Torvalds's avatar
Linus Torvalds committed
112 113
int min_free_kbytes = 1024;

114 115
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
116
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
117

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
   * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
   * ranges of memory (RAM) that may be registered with add_active_range().
   * Ranges passed to add_active_range() will be merged if possible
   * so the number of times add_active_range() can be called is
   * related to the number of nodes and the number of holes
   */
  #ifdef CONFIG_MAX_ACTIVE_REGIONS
    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  #else
    #if MAX_NUMNODES >= 32
      /* If there can be many nodes, allow up to 50 holes per node */
      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
    #else
      /* By default, allow up to 256 distinct regions */
      #define MAX_ACTIVE_REGIONS 256
    #endif
  #endif

139 140 141 142
  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  static int __meminitdata nr_nodemap_entries;
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
143
#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
144 145
  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
146
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
Mel Gorman's avatar
Mel Gorman committed
147
  unsigned long __initdata required_kernelcore;
148
  unsigned long __initdata required_movablecore;
149
  unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gorman's avatar
Mel Gorman committed
150 151 152 153

  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  int movable_zone;
  EXPORT_SYMBOL(movable_zone);
154 155
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */

Miklos Szeredi's avatar
Miklos Szeredi committed
156 157 158 159 160
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
EXPORT_SYMBOL(nr_node_ids);
#endif

Nick Piggin's avatar
Nick Piggin committed
161
#ifdef CONFIG_DEBUG_VM
162
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
163
{
164 165 166
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
167

168 169 170 171 172 173 174 175 176
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
177 178 179 180
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
181
	if (!pfn_valid_within(page_to_pfn(page)))
182
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
183
	if (zone != page_zone(page))
184 185 186 187 188 189 190 191 192 193
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
194
		return 1;
195 196 197
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
198 199
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
200 201 202 203 204 205 206
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

207
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
208
{
209
	printk(KERN_EMERG "Bad page state in process '%s'\n"
210 211 212
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
213 214 215
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
216
	dump_stack();
217 218
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
219 220 221
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
222 223
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
224
			1 << PG_swapcache |
225 226
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
227 228 229
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
230
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
231 232 233 234 235 236 237 238 239 240 241 242
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
243 244 245
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
246
 */
247 248 249

static void free_compound_page(struct page *page)
{
250
	__free_pages_ok(page, compound_order(page));
251 252
}

Linus Torvalds's avatar
Linus Torvalds committed
253 254 255 256 257
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

258
	set_compound_page_dtor(page, free_compound_page);
259
	set_compound_order(page, order);
260
	__SetPageHead(page);
261
	for (i = 1; i < nr_pages; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
262 263
		struct page *p = page + i;

264 265
		__SetPageTail(p);
		p->first_page = page;
Linus Torvalds's avatar
Linus Torvalds committed
266 267 268 269 270 271 272 273
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

274
	if (unlikely(compound_order(page) != order))
275
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
276

277
	if (unlikely(!PageHead(page)))
278
			bad_page(page);
279
	__ClearPageHead(page);
280
	for (i = 1; i < nr_pages; i++) {
Linus Torvalds's avatar
Linus Torvalds committed
281 282
		struct page *p = page + i;

283
		if (unlikely(!PageTail(p) |
284
				(p->first_page != page)))
285
			bad_page(page);
286
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
287 288 289
	}
}

Nick Piggin's avatar
Nick Piggin committed
290 291 292 293
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
294
	VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
295 296 297 298
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
299
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
300 301 302 303
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

Linus Torvalds's avatar
Linus Torvalds committed
304 305 306 307 308
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
309 310
static inline unsigned long page_order(struct page *page)
{
311
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
312 313
}

314 315
static inline void set_page_order(struct page *page, int order)
{
316
	set_page_private(page, order);
317
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
318 319 320 321
}

static inline void rmv_page_order(struct page *page)
{
322
	__ClearPageBuddy(page);
323
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
341
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
360
 * (a) the buddy is not in a hole &&
361
 * (b) the buddy is in the buddy system &&
362 363
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
364 365 366
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
367
 *
368
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
369
 */
370 371
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
372
{
373
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
374 375
		return 0;

376 377 378 379 380
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		BUG_ON(page_count(buddy) != 0);
381
		return 1;
382
	}
383
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
399
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
400
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
401 402 403 404 405 406 407 408 409
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
410
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
411 412 413 414 415
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;

416
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
417 418 419 420
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Nick Piggin's avatar
Nick Piggin committed
421 422
	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
423

424
	__mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
Linus Torvalds's avatar
Linus Torvalds committed
425 426 427 428 429 430
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct free_area *area;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
431
		if (!page_is_buddy(page, buddy, order))
Linus Torvalds's avatar
Linus Torvalds committed
432
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
433

Linus Torvalds's avatar
Linus Torvalds committed
434 435 436 437
		list_del(&buddy->lru);
		area = zone->free_area + order;
		area->nr_free--;
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
438
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
439 440 441 442 443 444 445 446 447
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
	list_add(&page->lru, &zone->free_area[order].free_list);
	zone->free_area[order].nr_free++;
}

448
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
449
{
Nick Piggin's avatar
Nick Piggin committed
450 451 452
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
453 454 455 456 457 458 459
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
460
			1 << PG_writeback |
461 462
			1 << PG_reserved |
			1 << PG_buddy ))))
463
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
464
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
465
		__ClearPageDirty(page);
466 467 468 469 470 471
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
472 473 474 475 476
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
477
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
478 479 480 481 482 483 484
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
485 486
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
487
{
Nick Piggin's avatar
Nick Piggin committed
488
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
489 490
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
491 492 493
	while (count--) {
		struct page *page;

Nick Piggin's avatar
Nick Piggin committed
494
		VM_BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
495
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
496
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
497
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
498
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
499
	}
Nick Piggin's avatar
Nick Piggin committed
500
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
501 502
}

Nick Piggin's avatar
Nick Piggin committed
503
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
504
{
505 506 507
	spin_lock(&zone->lock);
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
508
	__free_one_page(page, zone, order);
509
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
510 511 512 513 514
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
515
	int i;
516
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
517 518

	for (i = 0 ; i < (1 << order) ; ++i)
519
		reserved += free_pages_check(page + i);
520 521 522
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
523 524
	if (!PageHighMem(page))
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
Nick Piggin's avatar
Nick Piggin committed
525
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
526
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
527

Nick Piggin's avatar
Nick Piggin committed
528
	local_irq_save(flags);
529
	__count_vm_events(PGFREE, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
530
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
531
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
532 533
}

534 535 536 537 538 539 540 541
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
542
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
543
		__free_page(page);
544 545 546
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
547
		prefetchw(page);
548 549 550
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
551 552
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
553 554 555 556
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

557
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
558
		__free_pages(page, order);
559 560 561
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
577
static inline void expand(struct zone *zone, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
578 579 580 581 582 583 584 585
 	int low, int high, struct free_area *area)
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
586
		VM_BUG_ON(bad_range(zone, &page[size]));
Linus Torvalds's avatar
Linus Torvalds committed
587 588 589 590 591 592 593 594 595
		list_add(&page[size].lru, &area->free_list);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
596
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
597
{
Nick Piggin's avatar
Nick Piggin committed
598 599 600
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
601 602
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
603 604 605 606
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
607
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
608
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
609
			1 << PG_writeback |
610 611
			1 << PG_reserved |
			1 << PG_buddy ))))
612
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
613

614 615 616 617 618 619 620
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

621
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
Linus Torvalds's avatar
Linus Torvalds committed
622
			1 << PG_referenced | 1 << PG_arch_1 |
623
			1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
624
	set_page_private(page, 0);
625
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
626 627

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
628
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
629 630 631 632 633 634 635

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

636
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
}

/* 
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order)
{
	struct free_area * area;
	unsigned int current_order;
	struct page *page;

	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = zone->free_area + current_order;
		if (list_empty(&area->free_list))
			continue;

		page = list_entry(area->free_list.next, struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
658
		__mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
Nick Piggin's avatar
Nick Piggin committed
659 660
		expand(zone, page, order, current_order, area);
		return page;
Linus Torvalds's avatar
Linus Torvalds committed
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
	}

	return NULL;
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
			unsigned long count, struct list_head *list)
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
676
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
677
	for (i = 0; i < count; ++i) {
Nick Piggin's avatar
Nick Piggin committed
678 679
		struct page *page = __rmqueue(zone, order);
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
680 681 682
			break;
		list_add_tail(&page->lru, list);
	}
Nick Piggin's avatar
Nick Piggin committed
683
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
684
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
685 686
}

687
#ifdef CONFIG_NUMA
688
/*
689 690 691 692
 * Called from the vmstat counter updater to drain pagesets of this
 * currently executing processor on remote nodes after they have
 * expired.
 *
693 694
 * Note that this function must be called with the thread pinned to
 * a single processor.
695
 */
696
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
697 698
{
	unsigned long flags;
699
	int to_drain;
700

701 702 703 704 705 706 707 708
	local_irq_save(flags);
	if (pcp->count >= pcp->batch)
		to_drain = pcp->batch;
	else
		to_drain = pcp->count;
	free_pages_bulk(zone, to_drain, &pcp->list, 0);
	pcp->count -= to_drain;
	local_irq_restore(flags);
709 710 711
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
712 713
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
714
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
715 716 717 718 719 720
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

721 722 723
		if (!populated_zone(zone))
			continue;

724
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
725 726 727 728
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
729
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
730 731
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
732
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
733 734 735 736
		}
	}
}

737
#ifdef CONFIG_HIBERNATION
Linus Torvalds's avatar
Linus Torvalds committed
738 739 740

void mark_free_pages(struct zone *zone)
{
741 742
	unsigned long pfn, max_zone_pfn;
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
743 744 745 746 747 748 749
	int order;
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
750 751 752 753 754 755

	max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
		if (pfn_valid(pfn)) {
			struct page *page = pfn_to_page(pfn);

756 757
			if (!swsusp_page_is_forbidden(page))
				swsusp_unset_page_free(page);
758
		}
Linus Torvalds's avatar
Linus Torvalds committed
759 760 761

	for (order = MAX_ORDER - 1; order >= 0; --order)
		list_for_each(curr, &zone->free_area[order].free_list) {
762
			unsigned long i;
Linus Torvalds's avatar
Linus Torvalds committed
763

764 765
			pfn = page_to_pfn(list_entry(curr, struct page, lru));
			for (i = 0; i < (1UL << order); i++)
766
				swsusp_set_page_free(pfn_to_page(pfn + i));
767
		}
Linus Torvalds's avatar
Linus Torvalds committed
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782

	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
783
#endif /* CONFIG_HIBERNATION */
Linus Torvalds's avatar
Linus Torvalds committed
784 785 786 787 788 789 790 791 792 793 794 795

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	if (PageAnon(page))
		page->mapping = NULL;
796
	if (free_pages_check(page))
797 798
		return;

Nick Piggin's avatar
Nick Piggin committed
799 800
	if (!PageHighMem(page))
		debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
Nick Piggin's avatar
Nick Piggin committed
801
	arch_free_page(page, 0);
802 803
	kernel_map_pages(page, 1, 0);

804
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
805
	local_irq_save(flags);
806
	__count_vm_event(PGFREE);
Linus Torvalds's avatar
Linus Torvalds committed
807 808
	list_add(&page->lru, &pcp->list);
	pcp->count++;
Nick Piggin's avatar
Nick Piggin committed
809 810 811 812
	if (pcp->count >= pcp->high) {
		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
		pcp->count -= pcp->batch;
	}
Linus Torvalds's avatar
Linus Torvalds committed
813 814 815 816 817 818 819 820 821 822 823 824 825 826
	local_irq_restore(flags);
	put_cpu();
}

void fastcall free_hot_page(struct page *page)
{
	free_hot_cold_page(page, 0);
}
	
void fastcall free_cold_page(struct page *page)
{
	free_hot_cold_page(page, 1);
}

827 828 829 830 831 832 833 834 835 836 837 838
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
839 840
	VM_BUG_ON(PageCompound(page));
	VM_BUG_ON(!page_count(page));
841 842
	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
843 844
}

Linus Torvalds's avatar
Linus Torvalds committed
845 846 847 848 849
/*
 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 * or two.
 */
Nick Piggin's avatar
Nick Piggin committed
850 851
static struct page *buffered_rmqueue(struct zonelist *zonelist,
			struct zone *zone, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
852 853
{
	unsigned long flags;
854
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
855
	int cold = !!(gfp_flags & __GFP_COLD);
Nick Piggin's avatar
Nick Piggin committed
856
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
857

858
again:
Nick Piggin's avatar
Nick Piggin committed
859
	cpu  = get_cpu();
Nick Piggin's avatar
Nick Piggin committed
860
	if (likely(order == 0)) {
Linus Torvalds's avatar
Linus Torvalds committed
861 862
		struct per_cpu_pages *pcp;

Nick Piggin's avatar
Nick Piggin committed
863
		pcp = &zone_pcp(zone, cpu)->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
864
		local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
865
		if (!pcp->count) {
866
			pcp->count = rmqueue_bulk(zone, 0,
Linus Torvalds's avatar
Linus Torvalds committed
867
						pcp->batch, &pcp->list);
Nick Piggin's avatar
Nick Piggin committed
868 869
			if (unlikely(!pcp->count))
				goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
870
		}
Nick Piggin's avatar
Nick Piggin committed
871 872 873
		page = list_entry(pcp->list.next, struct page, lru);
		list_del(&page->lru);
		pcp->count--;
Rohit Seth's avatar
Rohit Seth committed
874
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
875 876
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order);
Nick Piggin's avatar
Nick Piggin committed
877 878 879
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
880 881
	}

882
	__count_zone_vm_events(PGALLOC, zone, 1 << order);
883
	zone_statistics(zonelist, zone);
Nick Piggin's avatar
Nick Piggin committed
884 885
	local_irq_restore(flags);
	put_cpu();
Linus Torvalds's avatar
Linus Torvalds committed
886

Nick Piggin's avatar
Nick Piggin committed
887
	VM_BUG_ON(bad_range(zone, page));
Nick Piggin's avatar
Nick Piggin committed
888
	if (prep_new_page(page, order, gfp_flags))
Nick Piggin's avatar
Nick Piggin committed
889
		goto again;
Linus Torvalds's avatar
Linus Torvalds committed
890
	return page;
Nick Piggin's avatar
Nick Piggin committed
891 892 893 894 895

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
896 897
}

Rohit Seth's avatar
Rohit Seth committed
898
#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
899 900 901 902 903 904
#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
Rohit Seth's avatar
Rohit Seth committed
905

906 907 908 909 910 911 912
#ifdef CONFIG_FAIL_PAGE_ALLOC

static struct fail_page_alloc_attr {
	struct fault_attr attr;

	u32 ignore_gfp_highmem;
	u32 ignore_gfp_wait;
913
	u32 min_order;
914 915 916 917 918

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

	struct dentry *ignore_gfp_highmem_file;
	struct dentry *ignore_gfp_wait_file;
919
	struct dentry *min_order_file;
920 921 922 923 924

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

} fail_page_alloc = {
	.attr = FAULT_ATTR_INITIALIZER,
925 926
	.ignore_gfp_wait = 1,
	.ignore_gfp_highmem = 1,
927
	.min_order = 1,
928 929 930 931 932 933 934 935 936 937
};

static int __init setup_fail_page_alloc(char *str)
{
	return setup_fault_attr(&fail_page_alloc.attr, str);
}
__setup("fail_page_alloc=", setup_fail_page_alloc);

static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
938 939
	if (order < fail_page_alloc.min_order)
		return 0;
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
	if (gfp_mask & __GFP_NOFAIL)
		return 0;
	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
		return 0;
	if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
		return 0;

	return should_fail(&fail_page_alloc.attr, 1 << order);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_page_alloc_debugfs(void)
{
	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
	struct dentry *dir;
	int err;

	err = init_fault_attr_dentries(&fail_page_alloc.attr,
				       "fail_page_alloc");
	if (err)
		return err;
	dir = fail_page_alloc.attr.dentries.dir;

	fail_page_alloc.ignore_gfp_wait_file =
		debugfs_create_bool("ignore-gfp-wait", mode, dir,
				      &fail_page_alloc.ignore_gfp_wait);

	fail_page_alloc.ignore_gfp_highmem_file =
		debugfs_create_bool("ignore-gfp-highmem", mode, dir,
				      &fail_page_alloc.ignore_gfp_highmem);
971 972 973
	fail_page_alloc.min_order_file =
		debugfs_create_u32("min-order", mode, dir,
				   &fail_page_alloc.min_order);
974 975

	if (!fail_page_alloc.ignore_gfp_wait_file ||
976 977
            !fail_page_alloc.ignore_gfp_highmem_file ||
            !fail_page_alloc.min_order_file) {
978 979 980
		err = -ENOMEM;
		debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
		debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
981
		debugfs_remove(fail_page_alloc.min_order_file);
982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
		cleanup_fault_attr_dentries(&fail_page_alloc.attr);
	}

	return err;
}

late_initcall(fail_page_alloc_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else /* CONFIG_FAIL_PAGE_ALLOC */

static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
	return 0;
}

#endif /* CONFIG_FAIL_PAGE_ALLOC */

Linus Torvalds's avatar
Linus Torvalds committed
1001 1002 1003 1004 1005
/*
 * Return 1 if free pages are above 'mark'. This takes into account the order
 * of the allocation.
 */
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
Rohit Seth's avatar
Rohit Seth committed
1006
		      int classzone_idx, int alloc_flags)
Linus Torvalds's avatar
Linus Torvalds committed
1007 1008
{
	/* free_pages my go negative - that's OK */
1009 1010
	long min = mark;
	long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
Linus Torvalds's avatar
Linus Torvalds committed
1011 1012
	int o;

Rohit Seth's avatar
Rohit Seth committed
1013
	if (alloc_flags & ALLOC_HIGH)
Linus Torvalds's avatar
Linus Torvalds committed
1014
		min -= min / 2;
Rohit Seth's avatar
Rohit Seth committed
1015
	if (alloc_flags & ALLOC_HARDER)
Linus Torvalds's avatar
Linus Torvalds committed
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
		min -= min / 4;

	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
		return 0;
	for (o = 0; o < order; o++) {
		/* At the next order, this order's pages become unavailable */
		free_pages -= z->free_area[o].nr_free << o;

		/* Require fewer higher order pages to be free */
		min >>= 1;

		if (free_pages <= min)
			return 0;
	}
	return 1;
}

1033 1034 1035