page_alloc.c 69.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
25
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
26 27 28 29 30 31 32 33 34 35
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
36
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
37 38
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
39
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43 44 45 46 47

#include <asm/tlbflush.h>
#include "internal.h"

/*
 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
 * initializer cleaner
 */
48
nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
49
EXPORT_SYMBOL(node_online_map);
50
nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
51
EXPORT_SYMBOL(node_possible_map);
52 53
unsigned long totalram_pages __read_mostly;
unsigned long totalhigh_pages __read_mostly;
54
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
55
long nr_swap_pages;
56
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
57

58
static void __free_pages_ok(struct page *page, unsigned int order);
59

Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63 64 65 66
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
67 68 69
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
70
 */
71
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
Linus Torvalds's avatar
Linus Torvalds committed
72 73 74 75 76 77 78

EXPORT_SYMBOL(totalram_pages);

/*
 * Used by page_zone() to look up the address of the struct zone whose
 * id is encoded in the upper bits of page->flags
 */
79
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
80 81
EXPORT_SYMBOL(zone_table);

82
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86 87
int min_free_kbytes = 1024;

unsigned long __initdata nr_kernel_pages;
unsigned long __initdata nr_all_pages;

Nick Piggin's avatar
Nick Piggin committed
88
#ifdef CONFIG_DEBUG_VM
89
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
90
{
91 92 93
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
94

95 96 97 98 99 100 101 102 103
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
104 105 106 107
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
Linus Torvalds's avatar
Linus Torvalds committed
108 109
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
110
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
111 112
#endif
	if (zone != page_zone(page))
113 114 115 116 117 118 119 120 121 122
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
123
		return 1;
124 125 126
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
127 128 129
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
130 131 132 133 134 135 136
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

137
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
138
{
139
	printk(KERN_EMERG "Bad page state in process '%s'\n"
140 141 142
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
143 144 145
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
146
	dump_stack();
147 148
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
149 150 151
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
152 153
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
154
			1 << PG_swapcache |
155 156
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
157 158 159
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
160
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
161 162 163 164 165 166 167 168 169 170 171 172
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
173 174 175
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
176
 */
177 178 179 180 181 182

static void free_compound_page(struct page *page)
{
	__free_pages_ok(page, (unsigned long)page[1].lru.prev);
}

Linus Torvalds's avatar
Linus Torvalds committed
183 184 185 186 187
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

188
	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
189
	page[1].lru.prev = (void *)order;
Linus Torvalds's avatar
Linus Torvalds committed
190 191 192
	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

193
		__SetPageCompound(p);
194
		set_page_private(p, (unsigned long)page);
Linus Torvalds's avatar
Linus Torvalds committed
195 196 197 198 199 200 201 202
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

203
	if (unlikely((unsigned long)page[1].lru.prev != order))
204
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
205 206 207 208

	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

209 210 211
		if (unlikely(!PageCompound(p) |
				(page_private(p) != (unsigned long)page)))
			bad_page(page);
212
		__ClearPageCompound(p);
Linus Torvalds's avatar
Linus Torvalds committed
213 214 215
	}
}

Nick Piggin's avatar
Nick Piggin committed
216 217 218 219 220
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
221 222 223 224 225
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
	BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
226 227 228 229
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

Linus Torvalds's avatar
Linus Torvalds committed
230 231 232 233 234 235
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
static inline unsigned long page_order(struct page *page) {
236
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
237 238 239
}

static inline void set_page_order(struct page *page, int order) {
240
	set_page_private(page, order);
241
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
242 243 244 245
}

static inline void rmv_page_order(struct page *page)
{
246
	__ClearPageBuddy(page);
247
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
 * Assumption: *_mem_map is contigious at least up to MAX_ORDER
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
284
 * (a) the buddy is not in a hole &&
285 286 287 288 289
 * (b) the buddy is in the buddy system &&
 * (c) a page and its buddy have the same order.
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
290
 *
291
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
292 293 294
 */
static inline int page_is_buddy(struct page *page, int order)
{
Nick Piggin's avatar
Nick Piggin committed
295 296 297 298 299
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
		return 0;
#endif

300 301
	if (PageBuddy(page) && page_order(page) == order) {
		BUG_ON(page_count(page) != 0);
Linus Torvalds's avatar
Linus Torvalds committed
302
               return 1;
303
	}
Linus Torvalds's avatar
Linus Torvalds committed
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
       return 0;
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
320
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
321
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
322 323 324 325 326 327 328 329 330
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
331
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
332 333 334 335 336
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;

337
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

	BUG_ON(page_idx & (order_size - 1));
	BUG_ON(bad_range(zone, page));

	zone->free_pages += order_size;
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct free_area *area;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
		if (!page_is_buddy(buddy, order))
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
354

Linus Torvalds's avatar
Linus Torvalds committed
355 356 357 358
		list_del(&buddy->lru);
		area = zone->free_area + order;
		area->nr_free--;
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
359
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
360 361 362 363 364 365 366 367 368
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
	list_add(&page->lru, &zone->free_area[order].free_list);
	zone->free_area[order].nr_free++;
}

369
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
370
{
Nick Piggin's avatar
Nick Piggin committed
371 372 373
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
374 375 376 377 378 379 380 381
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_reclaim	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
382
			1 << PG_writeback |
383 384
			1 << PG_reserved |
			1 << PG_buddy ))))
385
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
386
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
387
		__ClearPageDirty(page);
388 389 390 391 392 393
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
394 395 396 397 398
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
399
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
400 401 402 403 404 405 406
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
407 408
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
409
{
Nick Piggin's avatar
Nick Piggin committed
410
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
411 412
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
413 414 415 416
	while (count--) {
		struct page *page;

		BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
417
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
418
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
419
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
420
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
421
	}
Nick Piggin's avatar
Nick Piggin committed
422
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
423 424
}

Nick Piggin's avatar
Nick Piggin committed
425
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
426 427
{
	LIST_HEAD(list);
Nick Piggin's avatar
Nick Piggin committed
428 429 430 431 432 433 434
	list_add(&page->lru, &list);
	free_pages_bulk(zone, 1, &list, order);
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
435
	int i;
436
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
437 438

	arch_free_page(page, order);
439 440
	if (!PageHighMem(page))
		mutex_debug_check_no_locks_freed(page_address(page),
441
						 PAGE_SIZE<<order);
Linus Torvalds's avatar
Linus Torvalds committed
442 443

	for (i = 0 ; i < (1 << order) ; ++i)
444
		reserved += free_pages_check(page + i);
445 446 447
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
448
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
449
	local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
450
	__mod_page_state(pgfree, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
451
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
452
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
453 454
}

455 456 457 458 459 460 461 462
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
463
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
464
		__free_page(page);
465 466 467
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
468
		prefetchw(page);
469 470 471
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
472 473
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
474 475 476 477
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

478
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
479
		__free_pages(page, order);
480 481 482
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
498
static inline void expand(struct zone *zone, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
 	int low, int high, struct free_area *area)
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
		BUG_ON(bad_range(zone, &page[size]));
		list_add(&page[size].lru, &area->free_list);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
517
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
518
{
Nick Piggin's avatar
Nick Piggin committed
519 520 521
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
522 523
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
524 525 526 527 528
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
			1 << PG_reclaim	|
529
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
530
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
531
			1 << PG_writeback |
532 533
			1 << PG_reserved |
			1 << PG_buddy ))))
534
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
535

536 537 538 539 540 541 542
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
543 544 545
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
			1 << PG_referenced | 1 << PG_arch_1 |
			1 << PG_checked | 1 << PG_mappedtodisk);
546
	set_page_private(page, 0);
547
	set_page_refcounted(page);
Linus Torvalds's avatar
Linus Torvalds committed
548
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
549 550 551 552 553 554 555

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

556
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
}

/* 
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order)
{
	struct free_area * area;
	unsigned int current_order;
	struct page *page;

	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = zone->free_area + current_order;
		if (list_empty(&area->free_list))
			continue;

		page = list_entry(area->free_list.next, struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		zone->free_pages -= 1UL << order;
Nick Piggin's avatar
Nick Piggin committed
579 580
		expand(zone, page, order, current_order, area);
		return page;
Linus Torvalds's avatar
Linus Torvalds committed
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	}

	return NULL;
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
			unsigned long count, struct list_head *list)
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
596
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
597
	for (i = 0; i < count; ++i) {
Nick Piggin's avatar
Nick Piggin committed
598 599
		struct page *page = __rmqueue(zone, order);
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
600 601 602
			break;
		list_add_tail(&page->lru, list);
	}
Nick Piggin's avatar
Nick Piggin committed
603
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
604
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
605 606
}

607
#ifdef CONFIG_NUMA
608 609 610
/*
 * Called from the slab reaper to drain pagesets on a particular node that
 * belong to the currently executing processor.
611 612
 * Note that this function must be called with the thread pinned to
 * a single processor.
613 614
 */
void drain_node_pages(int nodeid)
615
{
616
	int i, z;
617 618
	unsigned long flags;

619 620
	for (z = 0; z < MAX_NR_ZONES; z++) {
		struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
621 622
		struct per_cpu_pageset *pset;

Nick Piggin's avatar
Nick Piggin committed
623
		pset = zone_pcp(zone, smp_processor_id());
624 625 626 627
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
628 629 630 631 632 633
			if (pcp->count) {
				local_irq_save(flags);
				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
				pcp->count = 0;
				local_irq_restore(flags);
			}
634 635 636 637 638
		}
	}
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
639 640 641
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
642
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
643 644 645 646 647 648
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

649
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
650 651 652 653
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
654
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
655 656
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
657
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
		}
	}
}
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_PM

void mark_free_pages(struct zone *zone)
{
	unsigned long zone_pfn, flags;
	int order;
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));

	for (order = MAX_ORDER - 1; order >= 0; --order)
		list_for_each(curr, &zone->free_area[order].free_list) {
			unsigned long start_pfn, i;

			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));

			for (i=0; i < (1<<order); i++)
				SetPageNosaveFree(pfn_to_page(start_pfn+i));
	}
	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
#endif /* CONFIG_PM */

Nick Piggin's avatar
Nick Piggin committed
703
static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
704 705 706 707 708 709
{
#ifdef CONFIG_NUMA
	pg_data_t *pg = z->zone_pgdat;
	pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
	struct per_cpu_pageset *p;

Nick Piggin's avatar
Nick Piggin committed
710
	p = zone_pcp(z, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
711
	if (pg == orig) {
712
		p->numa_hit++;
Linus Torvalds's avatar
Linus Torvalds committed
713 714
	} else {
		p->numa_miss++;
715
		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
Linus Torvalds's avatar
Linus Torvalds committed
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
	}
	if (pg == NODE_DATA(numa_node_id()))
		p->local_node++;
	else
		p->other_node++;
#endif
}

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	arch_free_page(page, 0);

	if (PageAnon(page))
		page->mapping = NULL;
737
	if (free_pages_check(page))
738 739 740 741
		return;

	kernel_map_pages(page, 1, 0);

742
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
743
	local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
744
	__inc_page_state(pgfree);
Linus Torvalds's avatar
Linus Torvalds committed
745 746
	list_add(&page->lru, &pcp->list);
	pcp->count++;
Nick Piggin's avatar
Nick Piggin committed
747 748 749 750
	if (pcp->count >= pcp->high) {
		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
		pcp->count -= pcp->batch;
	}
Linus Torvalds's avatar
Linus Torvalds committed
751 752 753 754 755 756 757 758 759 760 761 762 763 764
	local_irq_restore(flags);
	put_cpu();
}

void fastcall free_hot_page(struct page *page)
{
	free_hot_cold_page(page, 0);
}
	
void fastcall free_cold_page(struct page *page)
{
	free_hot_cold_page(page, 1);
}

765 766 767 768 769 770 771 772 773 774 775 776 777 778
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

	BUG_ON(PageCompound(page));
	BUG_ON(!page_count(page));
779 780
	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
781 782
}

Linus Torvalds's avatar
Linus Torvalds committed
783 784 785 786 787
/*
 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 * or two.
 */
Nick Piggin's avatar
Nick Piggin committed
788 789
static struct page *buffered_rmqueue(struct zonelist *zonelist,
			struct zone *zone, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
790 791
{
	unsigned long flags;
792
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
793
	int cold = !!(gfp_flags & __GFP_COLD);
Nick Piggin's avatar
Nick Piggin committed
794
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
795

796
again:
Nick Piggin's avatar
Nick Piggin committed
797
	cpu  = get_cpu();
Nick Piggin's avatar
Nick Piggin committed
798
	if (likely(order == 0)) {
Linus Torvalds's avatar
Linus Torvalds committed
799 800
		struct per_cpu_pages *pcp;

Nick Piggin's avatar
Nick Piggin committed
801
		pcp = &zone_pcp(zone, cpu)->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
802
		local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
803
		if (!pcp->count) {
Linus Torvalds's avatar
Linus Torvalds committed
804 805
			pcp->count += rmqueue_bulk(zone, 0,
						pcp->batch, &pcp->list);
Nick Piggin's avatar
Nick Piggin committed
806 807
			if (unlikely(!pcp->count))
				goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
808
		}
Nick Piggin's avatar
Nick Piggin committed
809 810 811
		page = list_entry(pcp->list.next, struct page, lru);
		list_del(&page->lru);
		pcp->count--;
Rohit Seth's avatar
Rohit Seth committed
812
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
813 814
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order);
Nick Piggin's avatar
Nick Piggin committed
815 816 817
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
818 819
	}

Nick Piggin's avatar
Nick Piggin committed
820 821 822 823
	__mod_page_state_zone(zone, pgalloc, 1 << order);
	zone_statistics(zonelist, zone, cpu);
	local_irq_restore(flags);
	put_cpu();
Linus Torvalds's avatar
Linus Torvalds committed
824

Nick Piggin's avatar
Nick Piggin committed
825
	BUG_ON(bad_range(zone, page));
Nick Piggin's avatar
Nick Piggin committed
826
	if (prep_new_page(page, order, gfp_flags))
Nick Piggin's avatar
Nick Piggin committed
827
		goto again;
Linus Torvalds's avatar
Linus Torvalds committed
828
	return page;
Nick Piggin's avatar
Nick Piggin committed
829 830 831 832 833

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
834 835
}

Rohit Seth's avatar
Rohit Seth committed
836
#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
837 838 839 840 841 842
#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
Rohit Seth's avatar
Rohit Seth committed
843

Linus Torvalds's avatar
Linus Torvalds committed
844 845 846 847 848
/*
 * Return 1 if free pages are above 'mark'. This takes into account the order
 * of the allocation.
 */
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
Rohit Seth's avatar
Rohit Seth committed
849
		      int classzone_idx, int alloc_flags)
Linus Torvalds's avatar
Linus Torvalds committed
850 851 852 853 854
{
	/* free_pages my go negative - that's OK */
	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
	int o;

Rohit Seth's avatar
Rohit Seth committed
855
	if (alloc_flags & ALLOC_HIGH)
Linus Torvalds's avatar
Linus Torvalds committed
856
		min -= min / 2;
Rohit Seth's avatar
Rohit Seth committed
857
	if (alloc_flags & ALLOC_HARDER)
Linus Torvalds's avatar
Linus Torvalds committed
858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
		min -= min / 4;

	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
		return 0;
	for (o = 0; o < order; o++) {
		/* At the next order, this order's pages become unavailable */
		free_pages -= z->free_area[o].nr_free << o;

		/* Require fewer higher order pages to be free */
		min >>= 1;

		if (free_pages <= min)
			return 0;
	}
	return 1;
}

Rohit Seth's avatar
Rohit Seth committed
875 876 877 878 879 880 881
/*
 * get_page_from_freeliest goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
		struct zonelist *zonelist, int alloc_flags)
Martin Hicks's avatar
Martin Hicks committed
882
{
Rohit Seth's avatar
Rohit Seth committed
883 884 885 886 887 888 889 890 891 892 893 894 895 896
	struct zone **z = zonelist->zones;
	struct page *page = NULL;
	int classzone_idx = zone_idx(*z);

	/*
	 * Go through the zonelist once, looking for a zone with enough free.
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
	 */
	do {
		if ((alloc_flags & ALLOC_CPUSET) &&
				!cpuset_zone_allowed(*z, gfp_mask))
			continue;

		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
897 898 899 900 901 902 903 904
			unsigned long mark;
			if (alloc_flags & ALLOC_WMARK_MIN)
				mark = (*z)->pages_min;
			else if (alloc_flags & ALLOC_WMARK_LOW)
				mark = (*z)->pages_low;
			else
				mark = (*z)->pages_high;
			if (!zone_watermark_ok(*z, order, mark,
Rohit Seth's avatar
Rohit Seth committed
905
				    classzone_idx, alloc_flags))
906 907 908
				if (!zone_reclaim_mode ||
				    !zone_reclaim(*z, gfp_mask, order))
					continue;
Rohit Seth's avatar
Rohit Seth committed
909 910
		}

Nick Piggin's avatar
Nick Piggin committed
911
		page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
Rohit Seth's avatar
Rohit Seth committed
912 913 914 915 916
		if (page) {
			break;
		}
	} while (*(++z) != NULL);
	return page;
Martin Hicks's avatar
Martin Hicks committed
917 918
}

Linus Torvalds's avatar
Linus Torvalds committed
919 920 921 922
/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page * fastcall
923
__alloc_pages(gfp_t gfp_mask, unsigned int order,
Linus Torvalds's avatar
Linus Torvalds committed
924 925
		struct zonelist *zonelist)
{
Al Viro's avatar
Al Viro committed
926
	const gfp_t wait = gfp_mask & __GFP_WAIT;
Rohit Seth's avatar
Rohit Seth committed
927
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
928 929 930 931
	struct page *page;
	struct reclaim_state reclaim_state;
	struct task_struct *p = current;
	int do_retry;
Rohit Seth's avatar
Rohit Seth committed
932
	int alloc_flags;
Linus Torvalds's avatar
Linus Torvalds committed
933 934 935 936
	int did_some_progress;

	might_sleep_if(wait);

937
restart:
Rohit Seth's avatar
Rohit Seth committed
938
	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
Linus Torvalds's avatar
Linus Torvalds committed
939

Rohit Seth's avatar
Rohit Seth committed
940
	if (unlikely(*z == NULL)) {
Linus Torvalds's avatar
Linus Torvalds committed
941 942 943
		/* Should this ever happen?? */
		return NULL;
	}
944

Rohit Seth's avatar
Rohit Seth committed
945
	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
946
				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
947 948
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
949

950
	do {
951 952
		if (cpuset_zone_allowed(*z, gfp_mask))
			wakeup_kswapd(*z, order);
953
	} while (*(++z));
Linus Torvalds's avatar
Linus Torvalds committed
954

955
	/*
Rohit Seth's avatar
Rohit Seth committed
956 957 958 959 960 961
	 * OK, we're below the kswapd watermark and have kicked background
	 * reclaim. Now things get more complex, so set up alloc_flags according
	 * to how we want to proceed.
	 *
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
Paul Jackson's avatar
Paul Jackson committed
962 963
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
964
	 */
965
	alloc_flags = ALLOC_WMARK_MIN;
Rohit Seth's avatar
Rohit Seth committed
966 967 968 969
	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
		alloc_flags |= ALLOC_HARDER;
	if (gfp_mask & __GFP_HIGH)
		alloc_flags |= ALLOC_HIGH;
970
	alloc_flags |= ALLOC_CPUSET;
Linus Torvalds's avatar
Linus Torvalds committed
971 972 973

	/*
	 * Go through the zonelist again. Let __GFP_HIGH and allocations
Rohit Seth's avatar
Rohit Seth committed
974
	 * coming from realtime tasks go deeper into reserves.
Linus Torvalds's avatar
Linus Torvalds committed
975 976 977
	 *
	 * This is the last chance, in general, before the goto nopage.
	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
978
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
Linus Torvalds's avatar
Linus Torvalds committed
979
	 */
Rohit Seth's avatar
Rohit Seth committed
980 981 982
	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
983 984

	/* This allocation should allow future memory freeing. */
985 986 987 988

	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
			&& !in_interrupt()) {
		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
Kirill Korotaev's avatar
Kirill Korotaev committed
989
nofail_alloc:
990
			/* go through the zonelist yet again, ignoring mins */
Rohit Seth's avatar
Rohit Seth committed
991
			page = get_page_from_freelist(gfp_mask, order,
992
				zonelist, ALLOC_NO_WATERMARKS);
Rohit Seth's avatar
Rohit Seth committed
993 994
			if (page)
				goto got_pg;
Kirill Korotaev's avatar
Kirill Korotaev committed
995 996 997 998
			if (gfp_mask & __GFP_NOFAIL) {
				blk_congestion_wait(WRITE, HZ/50);
				goto nofail_alloc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
		}
		goto nopage;
	}

	/* Atomic allocations - we can't balance anything */
	if (!wait)
		goto nopage;

rebalance:
	cond_resched();

	/* We now go into synchronous reclaim */
1011
	cpuset_memory_pressure_bump();
Linus Torvalds's avatar
Linus Torvalds committed
1012 1013 1014 1015
	p->flags |= PF_MEMALLOC;
	reclaim_state.reclaimed_slab = 0;
	p->reclaim_state = &reclaim_state;

Rohit Seth's avatar
Rohit Seth committed
1016
	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
1017 1018 1019 1020 1021 1022 1023

	p->reclaim_state = NULL;
	p->flags &= ~PF_MEMALLOC;

	cond_resched();

	if (likely(did_some_progress)) {
Rohit Seth's avatar
Rohit Seth committed
1024 1025 1026 1027
		page = get_page_from_freelist(gfp_mask, order,
						zonelist, alloc_flags);
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1028 1029 1030 1031 1032 1033 1034
	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
		/*
		 * Go through the zonelist yet one more time, keep
		 * very high watermark here, this is only to catch
		 * a parallel oom killing, we must fail if we're still
		 * under heavy pressure.
		 */
Rohit Seth's avatar
Rohit Seth committed
1035
		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1036
				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
1037 1038
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1039

1040
		out_of_memory(zonelist, gfp_mask, order);
Linus Torvalds's avatar
Linus Torvalds committed
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
		goto restart;
	}

	/*
	 * Don't let big-order allocations loop unless the caller explicitly
	 * requests that.  Wait for some write requests to complete then retry.
	 *
	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
	 * <= 3, but that may not be true in other implementations.
	 */
	do_retry = 0;
	if (!(gfp_mask & __GFP_NORETRY)) {
		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
			do_retry = 1;
		if (gfp_mask & __GFP_NOFAIL)
			do_retry = 1;
	}
	if (do_retry) {
		blk_congestion_wait(WRITE, HZ/50);
		goto rebalance;
	}

nopage:
	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
		printk(KERN_WARNING "%s: page allocation failure."
			" order:%d, mode:0x%x\n",
			p->comm, order, gfp_mask);
		dump_stack();
Janet Morgan's avatar
Janet Morgan committed
1069
		show_mem();
Linus Torvalds's avatar
Linus Torvalds committed
1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	}
got_pg:
	return page;
}

EXPORT_SYMBOL(__alloc_pages);

/*
 * Common helper functions.
 */
1080
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
{
	struct page * page;
	page = alloc_pages(gfp_mask, order);
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}

EXPORT_SYMBOL(__get_free_pages);

1091
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1092 1093 1094 1095 1096 1097 1098
{
	struct page * page;

	/*
	 * get_zeroed_page() returns a 32-bit address, which cannot represent
	 * a highmem page
	 */
Al Viro's avatar
Al Viro committed
1099
	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
Linus Torvalds's avatar
Linus Torvalds committed
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118

	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
	if (page)
		return (unsigned long) page_address(page);
	return 0;
}

EXPORT_SYMBOL(get_zeroed_page);

void __pagevec_free(struct pagevec *pvec)
{
	int i = pagevec_count(pvec);

	while (--i >= 0)
		free_hot_cold_page(pvec->pages[i], pvec->cold);
}

fastcall void __free_pages(struct page *page, unsigned int order)
{
Nick Piggin's avatar
Nick Piggin committed
1119
	if (put_page_testzero(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145