page_alloc.c 62.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
24
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
26
27
28
29
30
31
32
33
34
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
35
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
36
37
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
38
#include <linux/mempolicy.h>
39
#include <linux/stop_machine.h>
Linus Torvalds's avatar
Linus Torvalds committed
40
41

#include <asm/tlbflush.h>
42
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
43
44
45
46
47
48
#include "internal.h"

/*
 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
 * initializer cleaner
 */
49
nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
50
EXPORT_SYMBOL(node_online_map);
51
nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
52
EXPORT_SYMBOL(node_possible_map);
53
unsigned long totalram_pages __read_mostly;
54
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
55
long nr_swap_pages;
56
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
57

58
static void __free_pages_ok(struct page *page, unsigned int order);
59

Linus Torvalds's avatar
Linus Torvalds committed
60
61
62
63
64
65
66
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
67
68
69
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
70
 */
71
72
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
	 256,
73
#ifdef CONFIG_ZONE_DMA32
74
	 256,
75
#endif
76
#ifdef CONFIG_HIGHMEM
77
	 32
78
#endif
79
};
Linus Torvalds's avatar
Linus Torvalds committed
80
81
82
83
84
85
86

EXPORT_SYMBOL(totalram_pages);

/*
 * Used by page_zone() to look up the address of the struct zone whose
 * id is encoded in the upper bits of page->flags
 */
87
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
88
89
EXPORT_SYMBOL(zone_table);

90
91
static char *zone_names[MAX_NR_ZONES] = {
	 "DMA",
92
#ifdef CONFIG_ZONE_DMA32
93
	 "DMA32",
94
#endif
95
	 "Normal",
96
#ifdef CONFIG_HIGHMEM
97
	 "HighMem"
98
#endif
99
100
};

Linus Torvalds's avatar
Linus Torvalds committed
101
102
int min_free_kbytes = 1024;

103
104
unsigned long __meminitdata nr_kernel_pages;
unsigned long __meminitdata nr_all_pages;
Linus Torvalds's avatar
Linus Torvalds committed
105

Nick Piggin's avatar
Nick Piggin committed
106
#ifdef CONFIG_DEBUG_VM
107
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
108
{
109
110
111
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
112

113
114
115
116
117
118
119
120
121
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
122
123
124
125
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
Linus Torvalds's avatar
Linus Torvalds committed
126
127
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
128
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
129
130
#endif
	if (zone != page_zone(page))
131
132
133
134
135
136
137
138
139
140
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
141
		return 1;
142
143
144
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
145
146
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
147
148
149
150
151
152
153
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

154
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
155
{
156
	printk(KERN_EMERG "Bad page state in process '%s'\n"
157
158
159
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
160
161
162
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
163
	dump_stack();
164
165
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
166
167
168
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
169
170
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
171
			1 << PG_swapcache |
172
173
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
174
175
176
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
177
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
178
179
180
181
182
183
184
185
186
187
188
189
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
190
191
192
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
193
 */
194
195
196
197
198
199

static void free_compound_page(struct page *page)
{
	__free_pages_ok(page, (unsigned long)page[1].lru.prev);
}

Linus Torvalds's avatar
Linus Torvalds committed
200
201
202
203
204
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

205
	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
206
	page[1].lru.prev = (void *)order;
Linus Torvalds's avatar
Linus Torvalds committed
207
208
209
	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

210
		__SetPageCompound(p);
211
		set_page_private(p, (unsigned long)page);
Linus Torvalds's avatar
Linus Torvalds committed
212
213
214
215
216
217
218
219
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

220
	if (unlikely((unsigned long)page[1].lru.prev != order))
221
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
222
223
224
225

	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

226
227
228
		if (unlikely(!PageCompound(p) |
				(page_private(p) != (unsigned long)page)))
			bad_page(page);
229
		__ClearPageCompound(p);
Linus Torvalds's avatar
Linus Torvalds committed
230
231
232
	}
}

Nick Piggin's avatar
Nick Piggin committed
233
234
235
236
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
237
	VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
238
239
240
241
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
242
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
243
244
245
246
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

Linus Torvalds's avatar
Linus Torvalds committed
247
248
249
250
251
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
252
253
static inline unsigned long page_order(struct page *page)
{
254
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
255
256
}

257
258
static inline void set_page_order(struct page *page, int order)
{
259
	set_page_private(page, order);
260
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
261
262
263
264
}

static inline void rmv_page_order(struct page *page)
{
265
	__ClearPageBuddy(page);
266
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
284
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
303
 * (a) the buddy is not in a hole &&
304
 * (b) the buddy is in the buddy system &&
305
306
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
307
308
309
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
310
 *
311
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
312
 */
313
314
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
315
{
Nick Piggin's avatar
Nick Piggin committed
316
#ifdef CONFIG_HOLES_IN_ZONE
317
	if (!pfn_valid(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
318
319
320
		return 0;
#endif

321
322
323
324
325
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
		BUG_ON(page_count(buddy) != 0);
326
		return 1;
327
	}
328
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
344
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
345
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
346
347
348
349
350
351
352
353
354
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
355
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
356
357
358
359
360
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;

361
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
362
363
364
365
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

Nick Piggin's avatar
Nick Piggin committed
366
367
	VM_BUG_ON(page_idx & (order_size - 1));
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
368
369
370
371
372
373
374
375

	zone->free_pages += order_size;
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct free_area *area;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
376
		if (!page_is_buddy(page, buddy, order))
Linus Torvalds's avatar
Linus Torvalds committed
377
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
378

Linus Torvalds's avatar
Linus Torvalds committed
379
380
381
382
		list_del(&buddy->lru);
		area = zone->free_area + order;
		area->nr_free--;
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
383
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
384
385
386
387
388
389
390
391
392
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
	list_add(&page->lru, &zone->free_area[order].free_list);
	zone->free_area[order].nr_free++;
}

393
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
394
{
Nick Piggin's avatar
Nick Piggin committed
395
396
397
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
398
399
400
401
402
403
404
405
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_reclaim	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
406
			1 << PG_writeback |
407
408
			1 << PG_reserved |
			1 << PG_buddy ))))
409
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
410
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
411
		__ClearPageDirty(page);
412
413
414
415
416
417
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
418
419
420
421
422
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
423
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
424
425
426
427
428
429
430
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
431
432
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
433
{
Nick Piggin's avatar
Nick Piggin committed
434
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
435
436
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
437
438
439
	while (count--) {
		struct page *page;

Nick Piggin's avatar
Nick Piggin committed
440
		VM_BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
441
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
442
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
443
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
444
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
445
	}
Nick Piggin's avatar
Nick Piggin committed
446
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
447
448
}

Nick Piggin's avatar
Nick Piggin committed
449
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
450
{
451
452
453
454
455
	spin_lock(&zone->lock);
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
	__free_one_page(page, zone ,order);
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
456
457
458
459
460
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
461
	int i;
462
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
463
464

	arch_free_page(page, order);
465
	if (!PageHighMem(page))
466
467
		debug_check_no_locks_freed(page_address(page),
					   PAGE_SIZE<<order);
Linus Torvalds's avatar
Linus Torvalds committed
468
469

	for (i = 0 ; i < (1 << order) ; ++i)
470
		reserved += free_pages_check(page + i);
471
472
473
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
474
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
475
	local_irq_save(flags);
476
	__count_vm_events(PGFREE, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
477
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
478
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
479
480
}

481
482
483
484
485
486
487
488
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
489
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
490
		__free_page(page);
491
492
493
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
494
		prefetchw(page);
495
496
497
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
498
499
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
500
501
502
503
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

504
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
505
		__free_pages(page, order);
506
507
508
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
524
static inline void expand(struct zone *zone, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
525
526
527
528
529
530
531
532
 	int low, int high, struct free_area *area)
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
533
		VM_BUG_ON(bad_range(zone, &page[size]));
Linus Torvalds's avatar
Linus Torvalds committed
534
535
536
537
538
539
540
541
542
		list_add(&page[size].lru, &area->free_list);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
543
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
544
{
Nick Piggin's avatar
Nick Piggin committed
545
546
547
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
548
549
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
550
551
552
553
554
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
			1 << PG_reclaim	|
555
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
556
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
557
			1 << PG_writeback |
558
559
			1 << PG_reserved |
			1 << PG_buddy ))))
560
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
561

562
563
564
565
566
567
568
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
569
570
571
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
			1 << PG_referenced | 1 << PG_arch_1 |
			1 << PG_checked | 1 << PG_mappedtodisk);
572
	set_page_private(page, 0);
573
	set_page_refcounted(page);
Linus Torvalds's avatar
Linus Torvalds committed
574
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
575
576
577
578
579
580
581

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

582
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
}

/* 
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order)
{
	struct free_area * area;
	unsigned int current_order;
	struct page *page;

	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = zone->free_area + current_order;
		if (list_empty(&area->free_list))
			continue;

		page = list_entry(area->free_list.next, struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		zone->free_pages -= 1UL << order;
Nick Piggin's avatar
Nick Piggin committed
605
606
		expand(zone, page, order, current_order, area);
		return page;
Linus Torvalds's avatar
Linus Torvalds committed
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
	}

	return NULL;
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
			unsigned long count, struct list_head *list)
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
622
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
623
	for (i = 0; i < count; ++i) {
Nick Piggin's avatar
Nick Piggin committed
624
625
		struct page *page = __rmqueue(zone, order);
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
626
627
628
			break;
		list_add_tail(&page->lru, list);
	}
Nick Piggin's avatar
Nick Piggin committed
629
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
630
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
631
632
}

633
#ifdef CONFIG_NUMA
634
635
/*
 * Called from the slab reaper to drain pagesets on a particular node that
636
 * belongs to the currently executing processor.
637
638
 * Note that this function must be called with the thread pinned to
 * a single processor.
639
640
 */
void drain_node_pages(int nodeid)
641
{
642
643
	int i;
	enum zone_type z;
644
645
	unsigned long flags;

646
647
	for (z = 0; z < MAX_NR_ZONES; z++) {
		struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
648
649
		struct per_cpu_pageset *pset;

650
651
652
		if (!populated_zone(zone))
			continue;

Nick Piggin's avatar
Nick Piggin committed
653
		pset = zone_pcp(zone, smp_processor_id());
654
655
656
657
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
658
659
660
661
662
663
			if (pcp->count) {
				local_irq_save(flags);
				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
				pcp->count = 0;
				local_irq_restore(flags);
			}
664
665
666
667
668
		}
	}
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
669
670
671
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
672
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
673
674
675
676
677
678
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

679
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
680
681
682
683
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
684
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
685
686
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
687
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
		}
	}
}
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_PM

void mark_free_pages(struct zone *zone)
{
	unsigned long zone_pfn, flags;
	int order;
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));

	for (order = MAX_ORDER - 1; order >= 0; --order)
		list_for_each(curr, &zone->free_area[order].free_list) {
			unsigned long start_pfn, i;

			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));

			for (i=0; i < (1<<order); i++)
				SetPageNosaveFree(pfn_to_page(start_pfn+i));
	}
	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
#endif /* CONFIG_PM */

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	arch_free_page(page, 0);

	if (PageAnon(page))
		page->mapping = NULL;
746
	if (free_pages_check(page))
747
748
749
750
		return;

	kernel_map_pages(page, 1, 0);

751
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
752
	local_irq_save(flags);
753
	__count_vm_event(PGFREE);
Linus Torvalds's avatar
Linus Torvalds committed
754
755
	list_add(&page->lru, &pcp->list);
	pcp->count++;
Nick Piggin's avatar
Nick Piggin committed
756
757
758
759
	if (pcp->count >= pcp->high) {
		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
		pcp->count -= pcp->batch;
	}
Linus Torvalds's avatar
Linus Torvalds committed
760
761
762
763
764
765
766
767
768
769
770
771
772
773
	local_irq_restore(flags);
	put_cpu();
}

void fastcall free_hot_page(struct page *page)
{
	free_hot_cold_page(page, 0);
}
	
void fastcall free_cold_page(struct page *page)
{
	free_hot_cold_page(page, 1);
}

774
775
776
777
778
779
780
781
782
783
784
785
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

Nick Piggin's avatar
Nick Piggin committed
786
787
	VM_BUG_ON(PageCompound(page));
	VM_BUG_ON(!page_count(page));
788
789
	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
790
791
}

Linus Torvalds's avatar
Linus Torvalds committed
792
793
794
795
796
/*
 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 * or two.
 */
Nick Piggin's avatar
Nick Piggin committed
797
798
static struct page *buffered_rmqueue(struct zonelist *zonelist,
			struct zone *zone, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
799
800
{
	unsigned long flags;
801
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
802
	int cold = !!(gfp_flags & __GFP_COLD);
Nick Piggin's avatar
Nick Piggin committed
803
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
804

805
again:
Nick Piggin's avatar
Nick Piggin committed
806
	cpu  = get_cpu();
Nick Piggin's avatar
Nick Piggin committed
807
	if (likely(order == 0)) {
Linus Torvalds's avatar
Linus Torvalds committed
808
809
		struct per_cpu_pages *pcp;

Nick Piggin's avatar
Nick Piggin committed
810
		pcp = &zone_pcp(zone, cpu)->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
811
		local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
812
		if (!pcp->count) {
Linus Torvalds's avatar
Linus Torvalds committed
813
814
			pcp->count += rmqueue_bulk(zone, 0,
						pcp->batch, &pcp->list);
Nick Piggin's avatar
Nick Piggin committed
815
816
			if (unlikely(!pcp->count))
				goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
817
		}
Nick Piggin's avatar
Nick Piggin committed
818
819
820
		page = list_entry(pcp->list.next, struct page, lru);
		list_del(&page->lru);
		pcp->count--;
Rohit Seth's avatar
Rohit Seth committed
821
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
822
823
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order);
Nick Piggin's avatar
Nick Piggin committed
824
825
826
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
827
828
	}

829
	__count_zone_vm_events(PGALLOC, zone, 1 << order);
830
	zone_statistics(zonelist, zone);
Nick Piggin's avatar
Nick Piggin committed
831
832
	local_irq_restore(flags);
	put_cpu();
Linus Torvalds's avatar
Linus Torvalds committed
833

Nick Piggin's avatar
Nick Piggin committed
834
	VM_BUG_ON(bad_range(zone, page));
Nick Piggin's avatar
Nick Piggin committed
835
	if (prep_new_page(page, order, gfp_flags))
Nick Piggin's avatar
Nick Piggin committed
836
		goto again;
Linus Torvalds's avatar
Linus Torvalds committed
837
	return page;
Nick Piggin's avatar
Nick Piggin committed
838
839
840
841
842

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
843
844
}

Rohit Seth's avatar
Rohit Seth committed
845
#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
846
847
848
849
850
851
#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
Rohit Seth's avatar
Rohit Seth committed
852

Linus Torvalds's avatar
Linus Torvalds committed
853
854
855
856
857
/*
 * Return 1 if free pages are above 'mark'. This takes into account the order
 * of the allocation.
 */
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
Rohit Seth's avatar
Rohit Seth committed
858
		      int classzone_idx, int alloc_flags)
Linus Torvalds's avatar
Linus Torvalds committed
859
860
861
862
863
{
	/* free_pages my go negative - that's OK */
	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
	int o;

Rohit Seth's avatar
Rohit Seth committed
864
	if (alloc_flags & ALLOC_HIGH)
Linus Torvalds's avatar
Linus Torvalds committed
865
		min -= min / 2;
Rohit Seth's avatar
Rohit Seth committed
866
	if (alloc_flags & ALLOC_HARDER)
Linus Torvalds's avatar
Linus Torvalds committed
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
		min -= min / 4;

	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
		return 0;
	for (o = 0; o < order; o++) {
		/* At the next order, this order's pages become unavailable */
		free_pages -= z->free_area[o].nr_free << o;

		/* Require fewer higher order pages to be free */
		min >>= 1;

		if (free_pages <= min)
			return 0;
	}
	return 1;
}

Rohit Seth's avatar
Rohit Seth committed
884
885
886
887
888
889
890
/*
 * get_page_from_freeliest goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
		struct zonelist *zonelist, int alloc_flags)
Martin Hicks's avatar
Martin Hicks committed
891
{
Rohit Seth's avatar
Rohit Seth committed
892
893
894
	struct zone **z = zonelist->zones;
	struct page *page = NULL;
	int classzone_idx = zone_idx(*z);
895
	struct zone *zone;
Rohit Seth's avatar
Rohit Seth committed
896
897
898
899
900
901

	/*
	 * Go through the zonelist once, looking for a zone with enough free.
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
	 */
	do {
902
		zone = *z;
903
		if (unlikely((gfp_mask & __GFP_THISNODE) &&
904
			zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
905
				break;
Rohit Seth's avatar
Rohit Seth committed
906
		if ((alloc_flags & ALLOC_CPUSET) &&
907
				!cpuset_zone_allowed(zone, gfp_mask))
Rohit Seth's avatar
Rohit Seth committed
908
909
910
			continue;

		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
911
912
			unsigned long mark;
			if (alloc_flags & ALLOC_WMARK_MIN)
913
				mark = zone->pages_min;
914
			else if (alloc_flags & ALLOC_WMARK_LOW)
915
				mark = zone->pages_low;
916
			else
917
918
				mark = zone->pages_high;
			if (!zone_watermark_ok(zone , order, mark,
Rohit Seth's avatar
Rohit Seth committed
919
				    classzone_idx, alloc_flags))
920
				if (!zone_reclaim_mode ||
921
				    !zone_reclaim(zone, gfp_mask, order))
922
					continue;
Rohit Seth's avatar
Rohit Seth committed
923
924
		}

925
		page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
Rohit Seth's avatar
Rohit Seth committed
926
927
928
929
930
		if (page) {
			break;
		}
	} while (*(++z) != NULL);
	return page;
Martin Hicks's avatar
Martin Hicks committed
931
932
}

Linus Torvalds's avatar
Linus Torvalds committed
933
934
935
936
/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page * fastcall
937
__alloc_pages(gfp_t gfp_mask, unsigned int order,
Linus Torvalds's avatar
Linus Torvalds committed
938
939
		struct zonelist *zonelist)
{
Al Viro's avatar
Al Viro committed
940
	const gfp_t wait = gfp_mask & __GFP_WAIT;
Rohit Seth's avatar
Rohit Seth committed
941
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
942
943
944
945
	struct page *page;
	struct reclaim_state reclaim_state;
	struct task_struct *p = current;
	int do_retry;
Rohit Seth's avatar
Rohit Seth committed
946
	int alloc_flags;
Linus Torvalds's avatar
Linus Torvalds committed
947
948
949
950
	int did_some_progress;

	might_sleep_if(wait);

951
restart:
Rohit Seth's avatar
Rohit Seth committed
952
	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
Linus Torvalds's avatar
Linus Torvalds committed
953

Rohit Seth's avatar
Rohit Seth committed
954
	if (unlikely(*z == NULL)) {
Linus Torvalds's avatar
Linus Torvalds committed
955
956
957
		/* Should this ever happen?? */
		return NULL;
	}
958

Rohit Seth's avatar
Rohit Seth committed
959
	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
960
				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
961
962
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
963

964
	do {
965
		wakeup_kswapd(*z, order);
966
	} while (*(++z));
Linus Torvalds's avatar
Linus Torvalds committed
967

968
	/*
Rohit Seth's avatar
Rohit Seth committed
969
970
971
972
973
974
	 * OK, we're below the kswapd watermark and have kicked background
	 * reclaim. Now things get more complex, so set up alloc_flags according
	 * to how we want to proceed.
	 *
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
Paul Jackson's avatar
Paul Jackson committed
975
976
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
977
	 */
978
	alloc_flags = ALLOC_WMARK_MIN;
Rohit Seth's avatar
Rohit Seth committed
979
980
981
982
	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
		alloc_flags |= ALLOC_HARDER;
	if (gfp_mask & __GFP_HIGH)
		alloc_flags |= ALLOC_HIGH;
983
984
	if (wait)
		alloc_flags |= ALLOC_CPUSET;
Linus Torvalds's avatar
Linus Torvalds committed
985
986
987

	/*
	 * Go through the zonelist again. Let __GFP_HIGH and allocations
Rohit Seth's avatar
Rohit Seth committed
988
	 * coming from realtime tasks go deeper into reserves.
Linus Torvalds's avatar
Linus Torvalds committed
989
990
991
	 *
	 * This is the last chance, in general, before the goto nopage.
	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
992
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
Linus Torvalds's avatar
Linus Torvalds committed
993
	 */
Rohit Seth's avatar
Rohit Seth committed
994
995
996
	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
997
998

	/* This allocation should allow future memory freeing. */
999
1000
1001
1002

	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
			&& !in_interrupt()) {
		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
Kirill Korotaev's avatar
Kirill Korotaev committed
1003
nofail_alloc:
1004
			/* go through the zonelist yet again, ignoring mins */
Rohit Seth's avatar
Rohit Seth committed
1005
			page = get_page_from_freelist(gfp_mask, order,
1006
				zonelist, ALLOC_NO_WATERMARKS);
Rohit Seth's avatar
Rohit Seth committed
1007
1008
			if (page)
				goto got_pg;
Kirill Korotaev's avatar
Kirill Korotaev committed
1009
1010
1011
1012
			if (gfp_mask & __GFP_NOFAIL) {
				blk_congestion_wait(WRITE, HZ/50);
				goto nofail_alloc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
		}
		goto nopage;
	}

	/* Atomic allocations - we can't balance anything */
	if (!wait)
		goto nopage;

rebalance:
	cond_resched();

	/* We now go into synchronous reclaim */
1025
	cpuset_memory_pressure_bump();
Linus Torvalds's avatar
Linus Torvalds committed
1026
1027
1028
1029
	p->flags |= PF_MEMALLOC;
	reclaim_state.reclaimed_slab = 0;
	p->reclaim_state = &reclaim_state;

Rohit Seth's avatar
Rohit Seth committed
1030
	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
1031
1032
1033
1034
1035
1036
1037

	p->reclaim_state = NULL;
	p->flags &= ~PF_MEMALLOC;

	cond_resched();

	if (likely(did_some_progress)) {
Rohit Seth's avatar
Rohit Seth committed
1038
1039
1040
1041
		page = get_page_from_freelist(gfp_mask, order,
						zonelist, alloc_flags);
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1042
1043
1044
1045
1046
1047
1048
	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
		/*
		 * Go through the zonelist yet one more time, keep
		 * very high watermark here, this is only to catch
		 * a parallel oom killing, we must fail if we're still
		 * under heavy pressure.
		 */
Rohit Seth's avatar
Rohit Seth committed
1049
		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1050
				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
1051
1052
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1053

1054
		out_of_memory(zonelist, gfp_mask, order);
Linus Torvalds's avatar
Linus Torvalds committed
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
		goto restart;
	}

	/*
	 * Don't let big-order allocations loop unless the caller explicitly
	 * requests that.  Wait for some write requests to complete then retry.
	 *
	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
	 * <= 3, but that may not be true in other implementations.
	 */
	do_retry = 0;
	if (!(gfp_mask & __GFP_NORETRY)) {
		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
			do_retry = 1;
		if (gfp_mask & __GFP_NOFAIL)
			do_retry = 1;
	}
	if (do_retry) {
		blk_congestion_wait(WRITE, HZ/50);
		goto rebalance;
	}

nopage:
	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
		printk(KERN_WARNING "%s: page allocation failure."
			" order:%d, mode:0x%x\n",
			p->comm, order, gfp_mask);
		dump_stack();
Janet Morgan's avatar
Janet Morgan committed
1083
		show_mem();
Linus Torvalds's avatar
Linus Torvalds committed
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
	}
got_pg:
	return page;
}

EXPORT_SYMBOL(__alloc_pages);

/*
 * Common helper functions.
 */
1094
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
{
	struct page * page;
	page = alloc_pages(gfp_mask, order);
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}

EXPORT_SYMBOL(__get_free_pages);

1105
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1106
1107
1108
1109
1110
1111
1112
{
	struct page * page;

	/*
	 * get_zeroed_page() returns a 32-bit address, which cannot represent
	 * a highmem page
	 */
Nick Piggin's avatar
Nick Piggin committed
1113
	VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
Linus Torvalds's avatar
Linus Torvalds committed
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132

	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
	if (page)
		return (unsigned long) page_address(page);
	return 0;
}

EXPORT_SYMBOL(get_zeroed_page);

void __pagevec_free(struct pagevec *pvec)
{
	int i = pagevec_count(pvec);

	while (--i >= 0)
		free_hot_cold_page(pvec->pages[i], pvec->cold);
}

fastcall void __free_pages(struct page *page, unsigned int order)
{
Nick Piggin's avatar
Nick Piggin committed
1133
	if (put_page_testzero(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
		if (order == 0)
			free_hot_page(page);
		else
			__free_pages_ok(page, order);
	}
}

EXPORT_SYMBOL(__free_pages);

fastcall void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
Nick Piggin's avatar
Nick Piggin committed
1146
		VM_BUG_ON(!virt_addr_valid((void *)addr));
Linus Torvalds's avatar
Linus Torvalds committed
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
		__free_pages(virt_to_page((void *)addr), order);
	}
}

EXPORT_SYMBOL(free_pages);

/*
 * Total amount of free (allocatable) RAM:
 */
unsigned int nr_free_pages(void)
{
	unsigned int sum = 0;
	struct zone *zone;

	for_each_zone(zone)
		sum += zone->free_pages;

	return sum;
}

EXPORT_SYMBOL(nr_free_pages);

#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
1172
1173
	unsigned int sum = 0;
	enum zone_type i;
Linus Torvalds's avatar
Linus Torvalds committed
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183

	for (i = 0; i < MAX_NR_ZONES; i++)
		sum += pgdat->node_zones[i].free_pages;

	return sum;
}
#endif

static unsigned int nr_free_zone_pages(int offset)
{
1184
1185
	/* Just pick one node, since fallback list is circular */
	pg_data_t *pgdat = NODE_DATA(numa_node_id());
Linus Torvalds's avatar
Linus Torvalds committed
1186
1187
	unsigned int sum = 0;

1188
1189
1190
	struct zonelist *zonelist = pgdat->node_zonelists + offset;
	struct zone **zonep = zonelist->zones;
	struct zone *zone;
Linus Torvalds's avatar
Linus Torvalds committed
1191

1192
1193
1194
1195
1196
	for (zone = *zonep++; zone; zone = *zonep++) {
		unsigned long size = zone->present_pages;
		unsigned long high = zone->pages_high;
		if (size > high)
			sum += size - high;
Linus Torvalds's avatar
Linus Torvalds committed
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
	}

	return sum;
}

/*
 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
 */
unsigned int nr_free_buffer_pages(void)
{