page_alloc.c 66.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
25
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
26 27 28 29 30 31 32 33 34 35
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
36
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
37 38
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
39
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
40 41 42 43 44 45 46 47

#include <asm/tlbflush.h>
#include "internal.h"

/*
 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
 * initializer cleaner
 */
48
nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
49
EXPORT_SYMBOL(node_online_map);
50
nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
51
EXPORT_SYMBOL(node_possible_map);
52
struct pglist_data *pgdat_list __read_mostly;
53 54
unsigned long totalram_pages __read_mostly;
unsigned long totalhigh_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
55
long nr_swap_pages;
56
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
57

58 59
static void fastcall free_hot_cold_page(struct page *page, int cold);

Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63 64 65 66
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
67 68 69
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
70
 */
71
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
Linus Torvalds's avatar
Linus Torvalds committed
72 73 74 75 76 77 78

EXPORT_SYMBOL(totalram_pages);

/*
 * Used by page_zone() to look up the address of the struct zone whose
 * id is encoded in the upper bits of page->flags
 */
79
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
80 81
EXPORT_SYMBOL(zone_table);

82
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86 87
int min_free_kbytes = 1024;

unsigned long __initdata nr_kernel_pages;
unsigned long __initdata nr_all_pages;

Nick Piggin's avatar
Nick Piggin committed
88
#ifdef CONFIG_DEBUG_VM
89
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
90
{
91 92 93
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
94

95 96 97 98 99 100 101 102 103
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
104 105 106 107
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
Linus Torvalds's avatar
Linus Torvalds committed
108 109
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
110
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
111 112
#endif
	if (zone != page_zone(page))
113 114 115 116 117 118 119 120 121 122
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
123
		return 1;
124 125 126
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
127 128 129
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
130 131 132 133 134 135 136
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

137
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
138
{
139
	printk(KERN_EMERG "Bad page state in process '%s'\n"
140 141 142
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
143 144 145
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
146
	dump_stack();
147 148
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
149 150 151
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
152 153
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
154
			1 << PG_swapcache |
155
			1 << PG_writeback );
Linus Torvalds's avatar
Linus Torvalds committed
156 157 158
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
159
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
160 161 162 163 164 165 166 167 168 169 170 171
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
172 173 174
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
175 176 177 178 179 180
 */
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

181 182
	page[1].lru.next = NULL;			/* set dtor */
	page[1].lru.prev = (void *)order;
Linus Torvalds's avatar
Linus Torvalds committed
183 184 185 186
	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

		SetPageCompound(p);
187
		set_page_private(p, (unsigned long)page);
Linus Torvalds's avatar
Linus Torvalds committed
188 189 190 191 192 193 194 195
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

196
	if (unlikely((unsigned long)page[1].lru.prev != order))
197
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
198 199 200 201

	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

202 203 204
		if (unlikely(!PageCompound(p) |
				(page_private(p) != (unsigned long)page)))
			bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
205 206 207 208 209 210 211 212 213 214
		ClearPageCompound(p);
	}
}

/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
static inline unsigned long page_order(struct page *page) {
215
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
216 217 218
}

static inline void set_page_order(struct page *page, int order) {
219
	set_page_private(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
220 221 222 223 224 225
	__SetPagePrivate(page);
}

static inline void rmv_page_order(struct page *page)
{
	__ClearPagePrivate(page);
226
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
 * Assumption: *_mem_map is contigious at least up to MAX_ORDER
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
263 264 265 266
 * (a) the buddy is not in a hole &&
 * (b) the buddy is free &&
 * (c) the buddy is on the buddy system &&
 * (d) a page and its buddy have the same order.
267
 * for recording page's order, we use page_private(page) and PG_private.
Linus Torvalds's avatar
Linus Torvalds committed
268 269 270 271
 *
 */
static inline int page_is_buddy(struct page *page, int order)
{
Nick Piggin's avatar
Nick Piggin committed
272 273 274 275 276
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
		return 0;
#endif

Linus Torvalds's avatar
Linus Torvalds committed
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
       if (PagePrivate(page)           &&
           (page_order(page) == order) &&
            page_count(page) == 0)
               return 1;
       return 0;
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
 * free pages of length of (1 << order) and marked with PG_Private.Page's
298
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
299 300 301 302 303 304 305 306 307
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
308
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
309 310 311 312 313
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;

314
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

	BUG_ON(page_idx & (order_size - 1));
	BUG_ON(bad_range(zone, page));

	zone->free_pages += order_size;
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct free_area *area;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
		if (!page_is_buddy(buddy, order))
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
331

Linus Torvalds's avatar
Linus Torvalds committed
332 333 334 335
		list_del(&buddy->lru);
		area = zone->free_area + order;
		area->nr_free--;
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
336
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
337 338 339 340 341 342 343 344 345
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
	list_add(&page->lru, &zone->free_area[order].free_list);
	zone->free_area[order].nr_free++;
}

346
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
347
{
Nick Piggin's avatar
Nick Piggin committed
348 349 350
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
351 352 353 354 355 356 357 358
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_reclaim	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
359
			1 << PG_writeback |
Nick Piggin's avatar
Nick Piggin committed
360
			1 << PG_reserved ))))
361
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
362
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
363
		__ClearPageDirty(page);
364 365 366 367 368 369
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
370 371 372 373 374
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
375
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
376 377 378 379 380 381 382
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
383 384
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
385
{
Nick Piggin's avatar
Nick Piggin committed
386
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
387 388
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
389 390 391 392
	while (count--) {
		struct page *page;

		BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
393
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
394
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
395
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
396
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
397
	}
Nick Piggin's avatar
Nick Piggin committed
398
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
399 400
}

Nick Piggin's avatar
Nick Piggin committed
401
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
402 403
{
	LIST_HEAD(list);
Nick Piggin's avatar
Nick Piggin committed
404 405 406 407 408 409 410
	list_add(&page->lru, &list);
	free_pages_bulk(zone, 1, &list, order);
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
411
	int i;
412
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
413 414

	arch_free_page(page, order);
415 416
	if (!PageHighMem(page))
		mutex_debug_check_no_locks_freed(page_address(page),
417
						 PAGE_SIZE<<order);
Linus Torvalds's avatar
Linus Torvalds committed
418 419

#ifndef CONFIG_MMU
Nick Piggin's avatar
Nick Piggin committed
420 421
	for (i = 1 ; i < (1 << order) ; ++i)
		__put_page(page + i);
Linus Torvalds's avatar
Linus Torvalds committed
422 423 424
#endif

	for (i = 0 ; i < (1 << order) ; ++i)
425
		reserved += free_pages_check(page + i);
426 427 428
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
429
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
430
	local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
431
	__mod_page_state(pgfree, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
432
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
433
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
434 435
}

436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);

		free_hot_cold_page(page, 0);
	} else {
		LIST_HEAD(list);
		int loop;

		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

			if (loop + 16 < BITS_PER_LONG)
				prefetchw(p + 16);
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

		arch_free_page(page, order);

		mod_page_state(pgfree, 1 << order);

		list_add(&page->lru, &list);
		kernel_map_pages(page, 1 << order, 0);
		free_pages_bulk(page_zone(page), 1, &list, order);
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
484
static inline void expand(struct zone *zone, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
 	int low, int high, struct free_area *area)
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
		BUG_ON(bad_range(zone, &page[size]));
		list_add(&page[size].lru, &area->free_list);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
503
static int prep_new_page(struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
504
{
Nick Piggin's avatar
Nick Piggin committed
505 506 507
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
508 509
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
510 511 512 513 514
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
			1 << PG_reclaim	|
515
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
516
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
517
			1 << PG_writeback |
Nick Piggin's avatar
Nick Piggin committed
518
			1 << PG_reserved ))))
519
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
520

521 522 523 524 525 526 527
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
528 529 530
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
			1 << PG_referenced | 1 << PG_arch_1 |
			1 << PG_checked | 1 << PG_mappedtodisk);
531
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
532 533
	set_page_refs(page, order);
	kernel_map_pages(page, 1 << order, 1);
534
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
}

/* 
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order)
{
	struct free_area * area;
	unsigned int current_order;
	struct page *page;

	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = zone->free_area + current_order;
		if (list_empty(&area->free_list))
			continue;

		page = list_entry(area->free_list.next, struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		zone->free_pages -= 1UL << order;
Nick Piggin's avatar
Nick Piggin committed
557 558
		expand(zone, page, order, current_order, area);
		return page;
Linus Torvalds's avatar
Linus Torvalds committed
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573
	}

	return NULL;
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
			unsigned long count, struct list_head *list)
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
574
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
575
	for (i = 0; i < count; ++i) {
Nick Piggin's avatar
Nick Piggin committed
576 577
		struct page *page = __rmqueue(zone, order);
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
578 579 580
			break;
		list_add_tail(&page->lru, list);
	}
Nick Piggin's avatar
Nick Piggin committed
581
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
582
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
583 584
}

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
#ifdef CONFIG_NUMA
/* Called from the slab reaper to drain remote pagesets */
void drain_remote_pages(void)
{
	struct zone *zone;
	int i;
	unsigned long flags;

	local_irq_save(flags);
	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

		/* Do not drain local pagesets */
		if (zone->zone_pgdat->node_id == numa_node_id())
			continue;

Nick Piggin's avatar
Nick Piggin committed
601
		pset = zone_pcp(zone, smp_processor_id());
602 603 604 605
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
606 607
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
608 609 610 611 612 613
		}
	}
	local_irq_restore(flags);
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
614 615 616
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
617
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
618 619 620 621 622 623
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

624
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
625 626 627 628
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
629
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
630 631
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
632
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
		}
	}
}
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_PM

void mark_free_pages(struct zone *zone)
{
	unsigned long zone_pfn, flags;
	int order;
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));

	for (order = MAX_ORDER - 1; order >= 0; --order)
		list_for_each(curr, &zone->free_area[order].free_list) {
			unsigned long start_pfn, i;

			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));

			for (i=0; i < (1<<order); i++)
				SetPageNosaveFree(pfn_to_page(start_pfn+i));
	}
	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
#endif /* CONFIG_PM */

Nick Piggin's avatar
Nick Piggin committed
678
static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
679 680 681 682 683 684
{
#ifdef CONFIG_NUMA
	pg_data_t *pg = z->zone_pgdat;
	pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
	struct per_cpu_pageset *p;

Nick Piggin's avatar
Nick Piggin committed
685
	p = zone_pcp(z, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
686
	if (pg == orig) {
687
		p->numa_hit++;
Linus Torvalds's avatar
Linus Torvalds committed
688 689
	} else {
		p->numa_miss++;
690
		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
Linus Torvalds's avatar
Linus Torvalds committed
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
	}
	if (pg == NODE_DATA(numa_node_id()))
		p->local_node++;
	else
		p->other_node++;
#endif
}

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	arch_free_page(page, 0);

	if (PageAnon(page))
		page->mapping = NULL;
712
	if (free_pages_check(page))
713 714 715 716
		return;

	kernel_map_pages(page, 1, 0);

717
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
718
	local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
719
	__inc_page_state(pgfree);
Linus Torvalds's avatar
Linus Torvalds committed
720 721
	list_add(&page->lru, &pcp->list);
	pcp->count++;
Nick Piggin's avatar
Nick Piggin committed
722 723 724 725
	if (pcp->count >= pcp->high) {
		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
		pcp->count -= pcp->batch;
	}
Linus Torvalds's avatar
Linus Torvalds committed
726 727 728 729 730 731 732 733 734 735 736 737 738 739
	local_irq_restore(flags);
	put_cpu();
}

void fastcall free_hot_page(struct page *page)
{
	free_hot_cold_page(page, 0);
}
	
void fastcall free_cold_page(struct page *page)
{
	free_hot_cold_page(page, 1);
}

740
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
741 742 743 744 745 746 747 748 749 750 751 752 753
{
	int i;

	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
	for(i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

/*
 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 * or two.
 */
Nick Piggin's avatar
Nick Piggin committed
754 755
static struct page *buffered_rmqueue(struct zonelist *zonelist,
			struct zone *zone, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
756 757
{
	unsigned long flags;
758
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
759
	int cold = !!(gfp_flags & __GFP_COLD);
Nick Piggin's avatar
Nick Piggin committed
760
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
761

762
again:
Nick Piggin's avatar
Nick Piggin committed
763
	cpu  = get_cpu();
Nick Piggin's avatar
Nick Piggin committed
764
	if (likely(order == 0)) {
Linus Torvalds's avatar
Linus Torvalds committed
765 766
		struct per_cpu_pages *pcp;

Nick Piggin's avatar
Nick Piggin committed
767
		pcp = &zone_pcp(zone, cpu)->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
768
		local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
769
		if (!pcp->count) {
Linus Torvalds's avatar
Linus Torvalds committed
770 771
			pcp->count += rmqueue_bulk(zone, 0,
						pcp->batch, &pcp->list);
Nick Piggin's avatar
Nick Piggin committed
772 773
			if (unlikely(!pcp->count))
				goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
774
		}
Nick Piggin's avatar
Nick Piggin committed
775 776 777
		page = list_entry(pcp->list.next, struct page, lru);
		list_del(&page->lru);
		pcp->count--;
Rohit Seth's avatar
Rohit Seth committed
778
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
779 780
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order);
Nick Piggin's avatar
Nick Piggin committed
781 782 783
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
784 785
	}

Nick Piggin's avatar
Nick Piggin committed
786 787 788 789
	__mod_page_state_zone(zone, pgalloc, 1 << order);
	zone_statistics(zonelist, zone, cpu);
	local_irq_restore(flags);
	put_cpu();
Linus Torvalds's avatar
Linus Torvalds committed
790

Nick Piggin's avatar
Nick Piggin committed
791 792 793
	BUG_ON(bad_range(zone, page));
	if (prep_new_page(page, order))
		goto again;
Linus Torvalds's avatar
Linus Torvalds committed
794

Nick Piggin's avatar
Nick Piggin committed
795 796 797 798 799
	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
800
	return page;
Nick Piggin's avatar
Nick Piggin committed
801 802 803 804 805

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
806 807
}

Rohit Seth's avatar
Rohit Seth committed
808
#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
809 810 811 812 813 814
#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
Rohit Seth's avatar
Rohit Seth committed
815

Linus Torvalds's avatar
Linus Torvalds committed
816 817 818 819 820
/*
 * Return 1 if free pages are above 'mark'. This takes into account the order
 * of the allocation.
 */
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
Rohit Seth's avatar
Rohit Seth committed
821
		      int classzone_idx, int alloc_flags)
Linus Torvalds's avatar
Linus Torvalds committed
822 823 824 825 826
{
	/* free_pages my go negative - that's OK */
	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
	int o;

Rohit Seth's avatar
Rohit Seth committed
827
	if (alloc_flags & ALLOC_HIGH)
Linus Torvalds's avatar
Linus Torvalds committed
828
		min -= min / 2;
Rohit Seth's avatar
Rohit Seth committed
829
	if (alloc_flags & ALLOC_HARDER)
Linus Torvalds's avatar
Linus Torvalds committed
830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
		min -= min / 4;

	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
		return 0;
	for (o = 0; o < order; o++) {
		/* At the next order, this order's pages become unavailable */
		free_pages -= z->free_area[o].nr_free << o;

		/* Require fewer higher order pages to be free */
		min >>= 1;

		if (free_pages <= min)
			return 0;
	}
	return 1;
}

Rohit Seth's avatar
Rohit Seth committed
847 848 849 850 851 852 853
/*
 * get_page_from_freeliest goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
		struct zonelist *zonelist, int alloc_flags)
Martin Hicks's avatar
Martin Hicks committed
854
{
Rohit Seth's avatar
Rohit Seth committed
855 856 857 858 859 860 861 862 863 864 865 866 867 868
	struct zone **z = zonelist->zones;
	struct page *page = NULL;
	int classzone_idx = zone_idx(*z);

	/*
	 * Go through the zonelist once, looking for a zone with enough free.
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
	 */
	do {
		if ((alloc_flags & ALLOC_CPUSET) &&
				!cpuset_zone_allowed(*z, gfp_mask))
			continue;

		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
869 870 871 872 873 874 875 876
			unsigned long mark;
			if (alloc_flags & ALLOC_WMARK_MIN)
				mark = (*z)->pages_min;
			else if (alloc_flags & ALLOC_WMARK_LOW)
				mark = (*z)->pages_low;
			else
				mark = (*z)->pages_high;
			if (!zone_watermark_ok(*z, order, mark,
Rohit Seth's avatar
Rohit Seth committed
877
				    classzone_idx, alloc_flags))
878 879 880
				if (!zone_reclaim_mode ||
				    !zone_reclaim(*z, gfp_mask, order))
					continue;
Rohit Seth's avatar
Rohit Seth committed
881 882
		}

Nick Piggin's avatar
Nick Piggin committed
883
		page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
Rohit Seth's avatar
Rohit Seth committed
884 885 886 887 888
		if (page) {
			break;
		}
	} while (*(++z) != NULL);
	return page;
Martin Hicks's avatar
Martin Hicks committed
889 890
}

Linus Torvalds's avatar
Linus Torvalds committed
891 892 893 894
/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page * fastcall
895
__alloc_pages(gfp_t gfp_mask, unsigned int order,
Linus Torvalds's avatar
Linus Torvalds committed
896 897
		struct zonelist *zonelist)
{
Al Viro's avatar
Al Viro committed
898
	const gfp_t wait = gfp_mask & __GFP_WAIT;
Rohit Seth's avatar
Rohit Seth committed
899
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
900 901 902 903
	struct page *page;
	struct reclaim_state reclaim_state;
	struct task_struct *p = current;
	int do_retry;
Rohit Seth's avatar
Rohit Seth committed
904
	int alloc_flags;
Linus Torvalds's avatar
Linus Torvalds committed
905 906 907 908
	int did_some_progress;

	might_sleep_if(wait);

909
restart:
Rohit Seth's avatar
Rohit Seth committed
910
	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
Linus Torvalds's avatar
Linus Torvalds committed
911

Rohit Seth's avatar
Rohit Seth committed
912
	if (unlikely(*z == NULL)) {
Linus Torvalds's avatar
Linus Torvalds committed
913 914 915
		/* Should this ever happen?? */
		return NULL;
	}
916

Rohit Seth's avatar
Rohit Seth committed
917
	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
918
				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
919 920
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
921

922
	do {
Rohit Seth's avatar
Rohit Seth committed
923
		wakeup_kswapd(*z, order);
924
	} while (*(++z));
Linus Torvalds's avatar
Linus Torvalds committed
925

926
	/*
Rohit Seth's avatar
Rohit Seth committed
927 928 929 930 931 932
	 * OK, we're below the kswapd watermark and have kicked background
	 * reclaim. Now things get more complex, so set up alloc_flags according
	 * to how we want to proceed.
	 *
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
Paul Jackson's avatar
Paul Jackson committed
933 934
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
935
	 */
936
	alloc_flags = ALLOC_WMARK_MIN;
Rohit Seth's avatar
Rohit Seth committed
937 938 939 940
	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
		alloc_flags |= ALLOC_HARDER;
	if (gfp_mask & __GFP_HIGH)
		alloc_flags |= ALLOC_HIGH;
941
	alloc_flags |= ALLOC_CPUSET;
Linus Torvalds's avatar
Linus Torvalds committed
942 943 944

	/*
	 * Go through the zonelist again. Let __GFP_HIGH and allocations
Rohit Seth's avatar
Rohit Seth committed
945
	 * coming from realtime tasks go deeper into reserves.
Linus Torvalds's avatar
Linus Torvalds committed
946 947 948
	 *
	 * This is the last chance, in general, before the goto nopage.
	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
949
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
Linus Torvalds's avatar
Linus Torvalds committed
950
	 */
Rohit Seth's avatar
Rohit Seth committed
951 952 953
	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
954 955

	/* This allocation should allow future memory freeing. */
956 957 958 959

	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
			&& !in_interrupt()) {
		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
Kirill Korotaev's avatar
Kirill Korotaev committed
960
nofail_alloc:
961
			/* go through the zonelist yet again, ignoring mins */
Rohit Seth's avatar
Rohit Seth committed
962
			page = get_page_from_freelist(gfp_mask, order,
963
				zonelist, ALLOC_NO_WATERMARKS);
Rohit Seth's avatar
Rohit Seth committed
964 965
			if (page)
				goto got_pg;
Kirill Korotaev's avatar
Kirill Korotaev committed
966 967 968 969
			if (gfp_mask & __GFP_NOFAIL) {
				blk_congestion_wait(WRITE, HZ/50);
				goto nofail_alloc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
970 971 972 973 974 975 976 977 978 979 980 981
		}
		goto nopage;
	}

	/* Atomic allocations - we can't balance anything */
	if (!wait)
		goto nopage;

rebalance:
	cond_resched();

	/* We now go into synchronous reclaim */
982
	cpuset_memory_pressure_bump();
Linus Torvalds's avatar
Linus Torvalds committed
983 984 985 986
	p->flags |= PF_MEMALLOC;
	reclaim_state.reclaimed_slab = 0;
	p->reclaim_state = &reclaim_state;

Rohit Seth's avatar
Rohit Seth committed
987
	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
988 989 990 991 992 993 994

	p->reclaim_state = NULL;
	p->flags &= ~PF_MEMALLOC;

	cond_resched();

	if (likely(did_some_progress)) {
Rohit Seth's avatar
Rohit Seth committed
995 996 997 998
		page = get_page_from_freelist(gfp_mask, order,
						zonelist, alloc_flags);
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
999 1000 1001 1002 1003 1004 1005
	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
		/*
		 * Go through the zonelist yet one more time, keep
		 * very high watermark here, this is only to catch
		 * a parallel oom killing, we must fail if we're still
		 * under heavy pressure.
		 */
Rohit Seth's avatar
Rohit Seth committed
1006
		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1007
				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
1008 1009
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1010

1011
		out_of_memory(gfp_mask, order);
Linus Torvalds's avatar
Linus Torvalds committed
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
		goto restart;
	}

	/*
	 * Don't let big-order allocations loop unless the caller explicitly
	 * requests that.  Wait for some write requests to complete then retry.
	 *
	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
	 * <= 3, but that may not be true in other implementations.
	 */
	do_retry = 0;
	if (!(gfp_mask & __GFP_NORETRY)) {
		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
			do_retry = 1;
		if (gfp_mask & __GFP_NOFAIL)
			do_retry = 1;
	}
	if (do_retry) {
		blk_congestion_wait(WRITE, HZ/50);
		goto rebalance;
	}

nopage:
	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
		printk(KERN_WARNING "%s: page allocation failure."
			" order:%d, mode:0x%x\n",
			p->comm, order, gfp_mask);
		dump_stack();
Janet Morgan's avatar
Janet Morgan committed
1040
		show_mem();
Linus Torvalds's avatar
Linus Torvalds committed
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
	}
got_pg:
	return page;
}

EXPORT_SYMBOL(__alloc_pages);

/*
 * Common helper functions.
 */
1051
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
{
	struct page * page;
	page = alloc_pages(gfp_mask, order);
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}

EXPORT_SYMBOL(__get_free_pages);

1062
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1063 1064 1065 1066 1067 1068 1069
{
	struct page * page;

	/*
	 * get_zeroed_page() returns a 32-bit address, which cannot represent
	 * a highmem page
	 */
Al Viro's avatar
Al Viro committed
1070
	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
Linus Torvalds's avatar
Linus Torvalds committed
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089

	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
	if (page)
		return (unsigned long) page_address(page);
	return 0;
}

EXPORT_SYMBOL(get_zeroed_page);

void __pagevec_free(struct pagevec *pvec)
{
	int i = pagevec_count(pvec);

	while (--i >= 0)
		free_hot_cold_page(pvec->pages[i], pvec->cold);
}

fastcall void __free_pages(struct page *page, unsigned int order)
{
Nick Piggin's avatar
Nick Piggin committed
1090
	if (put_page_testzero(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139
		if (order == 0)
			free_hot_page(page);
		else
			__free_pages_ok(page, order);
	}
}

EXPORT_SYMBOL(__free_pages);

fastcall void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
		BUG_ON(!virt_addr_valid((void *)addr));
		__free_pages(virt_to_page((void *)addr), order);
	}
}

EXPORT_SYMBOL(free_pages);

/*
 * Total amount of free (allocatable) RAM:
 */
unsigned int nr_free_pages(void)
{
	unsigned int sum = 0;
	struct zone *zone;

	for_each_zone(zone)
		sum += zone->free_pages;

	return sum;
}

EXPORT_SYMBOL(nr_free_pages);

#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
	unsigned int i, sum = 0;

	for (i = 0; i < MAX_NR_ZONES; i++)
		sum += pgdat->node_zones[i].free_pages;

	return sum;
}
#endif

static unsigned int nr_free_zone_pages(int offset)
{
1140 1141
	/* Just pick one node, since fallback list is circular */
	pg_data_t *pgdat = NODE_DATA(numa_node_id());
Linus Torvalds's avatar
Linus Torvalds committed
1142 1143
	unsigned int sum = 0;

1144 1145 1146
	struct zonelist *zonelist = pgdat->node_zonelists + offset;
	struct zone **zonep = zonelist->zones;
	struct zone *zone;
Linus Torvalds's avatar
Linus Torvalds committed
1147

1148 1149 1150 1151 1152
	for (zone = *zonep++; zone; zone = *zonep++) {
		unsigned long size = zone->present_pages;
		unsigned long high = zone->pages_high;
		if (size > high)
			sum += size - high;
Linus Torvalds's avatar
Linus Torvalds committed
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	}

	return sum;
}

/*
 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
 */
unsigned int nr_free_buffer_pages(void)
{
Al Viro's avatar
Al Viro committed
1163
	return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds's avatar
Linus Torvalds committed
1164 1165 1166 1167 1168 1169 1170
}

/*
 * Amount of free RAM allocatable within all zones
 */
unsigned int nr_free_pagecache_pages(void)
{
Al Viro's avatar
Al Viro committed
1171
	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
Linus Torvalds's avatar
Linus Torvalds committed
1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
}

#ifdef CONFIG_HIGHMEM
unsigned int nr_free_highpages (void)
{
	pg_data_t *pgdat;
	unsigned int pages = 0;

	for_each_pgdat(pgdat)
		pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;

	return pages;
}
#endif

#ifdef CONFIG_NUMA
static void show_node(struct zone *zone)
{
	printk("Node %d ", zone->zone_pgdat->node_id);
}
#else
#define show_node(zone)	do { } while (0)
#endif

/*
 * Accumulate the page_state information across all CPUs.
 * The result is unavoidably approximate - it can change
 * during and after execution of this function.
 */
static DEFINE_PER_CPU(struct page_state, page_states) = {0};

atomic_t nr_pagecache = ATOMIC_INIT(0);
EXPORT_SYMBOL(nr_pagecache);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif

Nick Piggin's avatar
Nick Piggin committed
1209
static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
Linus Torvalds's avatar
Linus Torvalds committed
1210 1211 1212
{
	int cpu = 0;

1213
	memset(ret, 0, nr * sizeof(unsigned long));
1214
	cpus_and(*cpumask, *cpumask, cpu_online_map);
Linus Torvalds's avatar
Linus Torvalds committed
1215

1216
	cpu = first_cpu(*cpumask);
Linus Torvalds's avatar
Linus Torvalds committed
1217 1218 1219
	while (cpu < NR_CPUS) {
		unsigned long *in, *out, o