page_alloc.c 62.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
24
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27 28 29 30 31 32 33 34
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
35
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
36 37
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
38
#include <linux/mempolicy.h>
39
#include <linux/stop_machine.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41

#include <asm/tlbflush.h>
42
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
43 44 45 46 47 48
#include "internal.h"

/*
 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
 * initializer cleaner
 */
49
nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
50
EXPORT_SYMBOL(node_online_map);
51
nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
52
EXPORT_SYMBOL(node_possible_map);
53
unsigned long totalram_pages __read_mostly;
54
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
55
long nr_swap_pages;
56
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
57

58
static void __free_pages_ok(struct page *page, unsigned int order);
59

Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63 64 65 66
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
67 68 69
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
70
 */
71 72
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
	 256,
73
#ifdef CONFIG_ZONE_DMA32
74
	 256,
75
#endif
76
#ifdef CONFIG_HIGHMEM
77
	 32
78
#endif
79
};
Linus Torvalds's avatar
Linus Torvalds committed
80 81 82 83 84 85 86

EXPORT_SYMBOL(totalram_pages);

/*
 * Used by page_zone() to look up the address of the struct zone whose
 * id is encoded in the upper bits of page->flags
 */
87
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
88 89
EXPORT_SYMBOL(zone_table);

90 91
static char *zone_names[MAX_NR_ZONES] = {
	 "DMA",
92
#ifdef CONFIG_ZONE_DMA32
93
	 "DMA32",
94
#endif
95
	 "Normal",
96
#ifdef CONFIG_HIGHMEM
97
	 "HighMem"
98
#endif
99 100
};

Linus Torvalds's avatar
Linus Torvalds committed
101 102
int min_free_kbytes = 1024;

103 104
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
Linus Torvalds's avatar
Linus Torvalds committed
105

Nick Piggin's avatar
Nick Piggin committed
106
#ifdef CONFIG_DEBUG_VM
107
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
108
{
109 110 111
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
112

113 114 115 116 117 118 119 120 121
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
122 123 124 125
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
Linus Torvalds's avatar
Linus Torvalds committed
126 127
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
128
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
129 130
#endif
	if (zone != page_zone(page))
131 132 133 134 135 136 137 138 139 140
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
141
		return 1;
142 143 144
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
145 146
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
147 148 149 150 151 152 153
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

154
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
155
{
156
	printk(KERN_EMERG "Bad page state in process '%s'\n"
157 158 159
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
160 161 162
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
163
	dump_stack();
164 165
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
166 167 168
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
169 170
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
171
			1 << PG_swapcache |
172 173
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
174 175 176
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
177
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
178 179 180 181 182 183 184 185 186 187 188 189
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
190 191 192
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
193
 */
194 195 196 197 198 199

static void free_compound_page(struct page *page)
{
	__free_pages_ok(page, (unsigned long)page[1].lru.prev);
}

Linus Torvalds's avatar
Linus Torvalds committed
200 201 202 203 204
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

205
	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
206
	page[1].lru.prev = (void *)order;
Linus Torvalds's avatar
Linus Torvalds committed
207 208 209
	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

210
		__SetPageCompound(p);
211
		set_page_private(p, (unsigned long)page);
Linus Torvalds's avatar
Linus Torvalds committed
212 213 214 215 216 217 218 219
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

220
	if (unlikely((unsigned long)page[1].lru.prev != order))
221
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
222 223 224 225

	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

226 227 228
		if (unlikely(!PageCompound(p) |
				(page_private(p) != (unsigned long)page)))
			bad_page(page);
229
		__ClearPageCompound(p);
Linus Torvalds's avatar
Linus Torvalds committed
230 231 232
	}
}

Nick Piggin's avatar
Nick Piggin committed
233 234 235 236
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
237
	VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
238 239 240 241
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
242
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
243 244 245 246
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

Linus Torvalds's avatar
Linus Torvalds committed
247 248 249 250 251
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
252 253
static inline unsigned long page_order(struct page *page)
{
254
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
255 256
}

257 258
static inline void set_page_order(struct page *page, int order)
{
259
	set_page_private(page, order);
260
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
261 262 263 264
}

static inline void rmv_page_order(struct page *page)
{
265
	__ClearPageBuddy(page);
266
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
284
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
303
 * (a) the buddy is not in a hole &&
304
 * (b) the buddy is in the buddy system &&
305 306
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
307 308 309
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
310
 *
311
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
312
 */
313 314
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
315
{
Nick Piggin's avatar
Nick Piggin committed
316
#ifdef CONFIG_HOLES_IN_ZONE
317
	if (!pfn_valid(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
318 319 320
		return 0;
#endif

321 322 323 324 325
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		BUG_ON(page_count(buddy) != 0);
326
		return 1;
327
	}
328
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
344
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
345
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
346 347 348 349 350 351 352 353 354
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
355
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
356 357 358 359 360
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;

361
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
362 363 364 365
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Nick Piggin's avatar
Nick Piggin committed
366 367
	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
368 369 370 371 372 373 374 375

	zone->free_pages += order_size;
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct free_area *area;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
376
		if (!page_is_buddy(page, buddy, order))
Linus Torvalds's avatar
Linus Torvalds committed
377
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
378

Linus Torvalds's avatar
Linus Torvalds committed
379 380 381 382
		list_del(&buddy->lru);
		area = zone->free_area + order;
		area->nr_free--;
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
383
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
384 385 386 387 388 389 390 391 392
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
	list_add(&page->lru, &zone->free_area[order].free_list);
	zone->free_area[order].nr_free++;
}

393
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
394
{
Nick Piggin's avatar
Nick Piggin committed
395 396 397
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400 401 402 403 404 405
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_reclaim	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
406
			1 << PG_writeback |
407 408
			1 << PG_reserved |
			1 << PG_buddy ))))
409
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
410
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
411
		__ClearPageDirty(page);
412 413 414 415 416 417
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
418 419 420 421 422
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
423
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
424 425 426 427 428 429 430
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
431 432
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
433
{
Nick Piggin's avatar
Nick Piggin committed
434
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
435 436
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
437 438 439
	while (count--) {
		struct page *page;

Nick Piggin's avatar
Nick Piggin committed
440
		VM_BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
441
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
442
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
443
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
444
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
445
	}
Nick Piggin's avatar
Nick Piggin committed
446
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
447 448
}

Nick Piggin's avatar
Nick Piggin committed
449
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
450 451
{
	LIST_HEAD(list);
Nick Piggin's avatar
Nick Piggin committed
452 453 454 455 456 457 458
	list_add(&page->lru, &list);
	free_pages_bulk(zone, 1, &list, order);
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
459
	int i;
460
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
461 462

	arch_free_page(page, order);
463
	if (!PageHighMem(page))
464 465
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE<<order);
Linus Torvalds's avatar
Linus Torvalds committed
466 467

	for (i = 0 ; i < (1 << order) ; ++i)
468
		reserved += free_pages_check(page + i);
469 470 471
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
472
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
473
	local_irq_save(flags);
474
	__count_vm_events(PGFREE, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
475
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
476
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
477 478
}

479 480 481 482 483 484 485 486
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
487
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
488
		__free_page(page);
489 490 491
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
492
		prefetchw(page);
493 494 495
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
496 497
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
498 499 500 501
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

502
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
503
		__free_pages(page, order);
504 505 506
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
522
static inline void expand(struct zone *zone, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
523 524 525 526 527 528 529 530
 	int low, int high, struct free_area *area)
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
531
		VM_BUG_ON(bad_range(zone, &page[size]));
Linus Torvalds's avatar
Linus Torvalds committed
532 533 534 535 536 537 538 539 540
		list_add(&page[size].lru, &area->free_list);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
541
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
542
{
Nick Piggin's avatar
Nick Piggin committed
543 544 545
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
546 547
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
548 549 550 551 552
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
			1 << PG_reclaim	|
553
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
554
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
555
			1 << PG_writeback |
556 557
			1 << PG_reserved |
			1 << PG_buddy ))))
558
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
559

560 561 562 563 564 565 566
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
567 568 569
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
			1 << PG_referenced | 1 << PG_arch_1 |
			1 << PG_checked | 1 << PG_mappedtodisk);
570
	set_page_private(page, 0);
571
	set_page_refcounted(page);
Linus Torvalds's avatar
Linus Torvalds committed
572
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
573 574 575 576 577 578 579

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

580
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
}

/* 
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order)
{
	struct free_area * area;
	unsigned int current_order;
	struct page *page;

	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = zone->free_area + current_order;
		if (list_empty(&area->free_list))
			continue;

		page = list_entry(area->free_list.next, struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		zone->free_pages -= 1UL << order;
Nick Piggin's avatar
Nick Piggin committed
603 604
		expand(zone, page, order, current_order, area);
		return page;
Linus Torvalds's avatar
Linus Torvalds committed
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
	}

	return NULL;
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
			unsigned long count, struct list_head *list)
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
620
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
621
	for (i = 0; i < count; ++i) {
Nick Piggin's avatar
Nick Piggin committed
622 623
		struct page *page = __rmqueue(zone, order);
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
624 625 626
			break;
		list_add_tail(&page->lru, list);
	}
Nick Piggin's avatar
Nick Piggin committed
627
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
628
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
629 630
}

631
#ifdef CONFIG_NUMA
632 633 634
/*
 * Called from the slab reaper to drain pagesets on a particular node that
 * belong to the currently executing processor.
635 636
 * Note that this function must be called with the thread pinned to
 * a single processor.
637 638
 */
void drain_node_pages(int nodeid)
639
{
640 641
	int i;
	enum zone_type z;
642 643
	unsigned long flags;

644 645
	for (z = 0; z < MAX_NR_ZONES; z++) {
		struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
646 647
		struct per_cpu_pageset *pset;

Nick Piggin's avatar
Nick Piggin committed
648
		pset = zone_pcp(zone, smp_processor_id());
649 650 651 652
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
653 654 655 656 657 658
			if (pcp->count) {
				local_irq_save(flags);
				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
				pcp->count = 0;
				local_irq_restore(flags);
			}
659 660 661 662 663
		}
	}
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
664 665 666
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
667
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
668 669 670 671 672 673
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

674
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
675 676 677 678
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
679
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
680 681
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
682
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
		}
	}
}
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_PM

void mark_free_pages(struct zone *zone)
{
	unsigned long zone_pfn, flags;
	int order;
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));

	for (order = MAX_ORDER - 1; order >= 0; --order)
		list_for_each(curr, &zone->free_area[order].free_list) {
			unsigned long start_pfn, i;

			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));

			for (i=0; i < (1<<order); i++)
				SetPageNosaveFree(pfn_to_page(start_pfn+i));
	}
	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
#endif /* CONFIG_PM */

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	arch_free_page(page, 0);

	if (PageAnon(page))
		page->mapping = NULL;
741
	if (free_pages_check(page))
742 743 744 745
		return;

	kernel_map_pages(page, 1, 0);

746
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
747
	local_irq_save(flags);
748
	__count_vm_event(PGFREE);
Linus Torvalds's avatar
Linus Torvalds committed
749 750
	list_add(&page->lru, &pcp->list);
	pcp->count++;
Nick Piggin's avatar
Nick Piggin committed
751 752 753 754
	if (pcp->count >= pcp->high) {
		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
		pcp->count -= pcp->batch;
	}
Linus Torvalds's avatar
Linus Torvalds committed
755 756 757 758 759 760 761 762 763 764 765 766 767 768
	local_irq_restore(flags);
	put_cpu();
}

void fastcall free_hot_page(struct page *page)
{
	free_hot_cold_page(page, 0);
}
	
void fastcall free_cold_page(struct page *page)
{
	free_hot_cold_page(page, 1);
}

769 770 771 772 773 774 775 776 777 778 779 780
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
781 782
	VM_BUG_ON(PageCompound(page));
	VM_BUG_ON(!page_count(page));
783 784
	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
785 786
}

Linus Torvalds's avatar
Linus Torvalds committed
787 788 789 790 791
/*
 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 * or two.
 */
Nick Piggin's avatar
Nick Piggin committed
792 793
static struct page *buffered_rmqueue(struct zonelist *zonelist,
			struct zone *zone, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
794 795
{
	unsigned long flags;
796
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
797
	int cold = !!(gfp_flags & __GFP_COLD);
Nick Piggin's avatar
Nick Piggin committed
798
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
799

800
again:
Nick Piggin's avatar
Nick Piggin committed
801
	cpu  = get_cpu();
Nick Piggin's avatar
Nick Piggin committed
802
	if (likely(order == 0)) {
Linus Torvalds's avatar
Linus Torvalds committed
803 804
		struct per_cpu_pages *pcp;

Nick Piggin's avatar
Nick Piggin committed
805
		pcp = &zone_pcp(zone, cpu)->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
806
		local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
807
		if (!pcp->count) {
Linus Torvalds's avatar
Linus Torvalds committed
808 809
			pcp->count += rmqueue_bulk(zone, 0,
						pcp->batch, &pcp->list);
Nick Piggin's avatar
Nick Piggin committed
810 811
			if (unlikely(!pcp->count))
				goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
812
		}
Nick Piggin's avatar
Nick Piggin committed
813 814 815
		page = list_entry(pcp->list.next, struct page, lru);
		list_del(&page->lru);
		pcp->count--;
Rohit Seth's avatar
Rohit Seth committed
816
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
817 818
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order);
Nick Piggin's avatar
Nick Piggin committed
819 820 821
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
822 823
	}

824
	__count_zone_vm_events(PGALLOC, zone, 1 << order);
825
	zone_statistics(zonelist, zone);
Nick Piggin's avatar
Nick Piggin committed
826 827
	local_irq_restore(flags);
	put_cpu();
Linus Torvalds's avatar
Linus Torvalds committed
828

Nick Piggin's avatar
Nick Piggin committed
829
	VM_BUG_ON(bad_range(zone, page));
Nick Piggin's avatar
Nick Piggin committed
830
	if (prep_new_page(page, order, gfp_flags))
Nick Piggin's avatar
Nick Piggin committed
831
		goto again;
Linus Torvalds's avatar
Linus Torvalds committed
832
	return page;
Nick Piggin's avatar
Nick Piggin committed
833 834 835 836 837

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
838 839
}

Rohit Seth's avatar
Rohit Seth committed
840
#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
841 842 843 844 845 846
#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
Rohit Seth's avatar
Rohit Seth committed
847

Linus Torvalds's avatar
Linus Torvalds committed
848 849 850 851 852
/*
 * Return 1 if free pages are above 'mark'. This takes into account the order
 * of the allocation.
 */
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
Rohit Seth's avatar
Rohit Seth committed
853
		      int classzone_idx, int alloc_flags)
Linus Torvalds's avatar
Linus Torvalds committed
854 855 856 857 858
{
	/* free_pages my go negative - that's OK */
	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
	int o;

Rohit Seth's avatar
Rohit Seth committed
859
	if (alloc_flags & ALLOC_HIGH)
Linus Torvalds's avatar
Linus Torvalds committed
860
		min -= min / 2;
Rohit Seth's avatar
Rohit Seth committed
861
	if (alloc_flags & ALLOC_HARDER)
Linus Torvalds's avatar
Linus Torvalds committed
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
		min -= min / 4;

	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
		return 0;
	for (o = 0; o < order; o++) {
		/* At the next order, this order's pages become unavailable */
		free_pages -= z->free_area[o].nr_free << o;

		/* Require fewer higher order pages to be free */
		min >>= 1;

		if (free_pages <= min)
			return 0;
	}
	return 1;
}

Rohit Seth's avatar
Rohit Seth committed
879 880 881 882 883 884 885
/*
 * get_page_from_freeliest goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
		struct zonelist *zonelist, int alloc_flags)
Martin Hicks's avatar
Martin Hicks committed
886
{
Rohit Seth's avatar
Rohit Seth committed
887 888 889
	struct zone **z = zonelist->zones;
	struct page *page = NULL;
	int classzone_idx = zone_idx(*z);
890
	struct zone *zone;
Rohit Seth's avatar
Rohit Seth committed
891 892 893 894 895 896

	/*
	 * Go through the zonelist once, looking for a zone with enough free.
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
	 */
	do {
897
		zone = *z;
898
		if (unlikely((gfp_mask & __GFP_THISNODE) &&
899
			zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
900
				break;
Rohit Seth's avatar
Rohit Seth committed
901
		if ((alloc_flags & ALLOC_CPUSET) &&
902
				!cpuset_zone_allowed(zone, gfp_mask))
Rohit Seth's avatar
Rohit Seth committed
903 904 905
			continue;

		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
906 907
			unsigned long mark;
			if (alloc_flags & ALLOC_WMARK_MIN)
908
				mark = zone->pages_min;
909
			else if (alloc_flags & ALLOC_WMARK_LOW)
910
				mark = zone->pages_low;
911
			else
912 913
				mark = zone->pages_high;
			if (!zone_watermark_ok(zone , order, mark,
Rohit Seth's avatar
Rohit Seth committed
914
				    classzone_idx, alloc_flags))
915
				if (!zone_reclaim_mode ||
916
				    !zone_reclaim(zone, gfp_mask, order))
917
					continue;
Rohit Seth's avatar
Rohit Seth committed
918 919
		}

920
		page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
Rohit Seth's avatar
Rohit Seth committed
921 922 923 924 925
		if (page) {
			break;
		}
	} while (*(++z) != NULL);
	return page;
Martin Hicks's avatar
Martin Hicks committed
926 927
}

Linus Torvalds's avatar
Linus Torvalds committed
928 929 930 931
/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page * fastcall
932
__alloc_pages(gfp_t gfp_mask, unsigned int order,
Linus Torvalds's avatar
Linus Torvalds committed
933 934
		struct zonelist *zonelist)
{
Al Viro's avatar
Al Viro committed
935
	const gfp_t wait = gfp_mask & __GFP_WAIT;
Rohit Seth's avatar
Rohit Seth committed
936
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
937 938 939 940
	struct page *page;
	struct reclaim_state reclaim_state;
	struct task_struct *p = current;
	int do_retry;
Rohit Seth's avatar
Rohit Seth committed
941
	int alloc_flags;
Linus Torvalds's avatar
Linus Torvalds committed
942 943 944 945
	int did_some_progress;

	might_sleep_if(wait);

946
restart:
Rohit Seth's avatar
Rohit Seth committed
947
	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
Linus Torvalds's avatar
Linus Torvalds committed
948

Rohit Seth's avatar
Rohit Seth committed
949
	if (unlikely(*z == NULL)) {
Linus Torvalds's avatar
Linus Torvalds committed
950 951 952
		/* Should this ever happen?? */
		return NULL;
	}
953

Rohit Seth's avatar
Rohit Seth committed
954
	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
955
				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
956 957
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
958

959
	do {
960
		wakeup_kswapd(*z, order);
961
	} while (*(++z));
Linus Torvalds's avatar
Linus Torvalds committed
962

963
	/*
Rohit Seth's avatar
Rohit Seth committed
964 965 966 967 968 969
	 * OK, we're below the kswapd watermark and have kicked background
	 * reclaim. Now things get more complex, so set up alloc_flags according
	 * to how we want to proceed.
	 *
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
Paul Jackson's avatar
Paul Jackson committed
970 971
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
972
	 */
973
	alloc_flags = ALLOC_WMARK_MIN;
Rohit Seth's avatar
Rohit Seth committed
974 975 976 977
	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
		alloc_flags |= ALLOC_HARDER;
	if (gfp_mask & __GFP_HIGH)
		alloc_flags |= ALLOC_HIGH;
978 979
	if (wait)
		alloc_flags |= ALLOC_CPUSET;
Linus Torvalds's avatar
Linus Torvalds committed
980 981 982

	/*
	 * Go through the zonelist again. Let __GFP_HIGH and allocations
Rohit Seth's avatar
Rohit Seth committed
983
	 * coming from realtime tasks go deeper into reserves.
Linus Torvalds's avatar
Linus Torvalds committed
984 985 986
	 *
	 * This is the last chance, in general, before the goto nopage.
	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
987
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
Linus Torvalds's avatar
Linus Torvalds committed
988
	 */
Rohit Seth's avatar
Rohit Seth committed
989 990 991
	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
992 993

	/* This allocation should allow future memory freeing. */
994 995 996 997

	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
			&& !in_interrupt()) {
		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
Kirill Korotaev's avatar
Kirill Korotaev committed
998
nofail_alloc:
999
			/* go through the zonelist yet again, ignoring mins */
Rohit Seth's avatar
Rohit Seth committed
1000
			page = get_page_from_freelist(gfp_mask, order,
1001
				zonelist, ALLOC_NO_WATERMARKS);
Rohit Seth's avatar
Rohit Seth committed
1002 1003
			if (page)
				goto got_pg;
Kirill Korotaev's avatar
Kirill Korotaev committed
1004 1005 1006 1007
			if (gfp_mask & __GFP_NOFAIL) {
				blk_congestion_wait(WRITE, HZ/50);
				goto nofail_alloc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
		}
		goto nopage;
	}

	/* Atomic allocations - we can't balance anything */
	if (!wait)
		goto nopage;

rebalance:
	cond_resched();

	/* We now go into synchronous reclaim */
1020
	cpuset_memory_pressure_bump();
Linus Torvalds's avatar
Linus Torvalds committed
1021 1022 1023 1024
	p->flags |= PF_MEMALLOC;
	reclaim_state.reclaimed_slab = 0;
	p->reclaim_state = &reclaim_state;

Rohit Seth's avatar
Rohit Seth committed
1025
	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
1026 1027 1028 1029 1030 1031 1032

	p->reclaim_state = NULL;
	p->flags &= ~PF_MEMALLOC;

	cond_resched();

	if (likely(did_some_progress)) {
Rohit Seth's avatar
Rohit Seth committed
1033 1034 1035 1036
		page = get_page_from_freelist(gfp_mask, order,
						zonelist, alloc_flags);
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1037 1038 1039 1040 1041 1042 1043
	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
		/*
		 * Go through the zonelist yet one more time, keep
		 * very high watermark here, this is only to catch
		 * a parallel oom killing, we must fail if we're still
		 * under heavy pressure.
		 */