page_alloc.c 69.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
#include <linux/compiler.h>
25
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
27
28
29
30
31
32
33
34
35
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
36
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
37
38
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
39
#include <linux/mempolicy.h>
Linus Torvalds's avatar
Linus Torvalds committed
40
41

#include <asm/tlbflush.h>
42
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
43
44
45
46
47
48
#include "internal.h"

/*
 * MCD - HACK: Find somewhere to initialize this EARLY, or make this
 * initializer cleaner
 */
49
nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
50
EXPORT_SYMBOL(node_online_map);
51
nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
52
EXPORT_SYMBOL(node_possible_map);
53
54
unsigned long totalram_pages __read_mostly;
unsigned long totalhigh_pages __read_mostly;
55
unsigned long totalreserve_pages __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
56
long nr_swap_pages;
57
int percpu_pagelist_fraction;
Linus Torvalds's avatar
Linus Torvalds committed
58

59
static void __free_pages_ok(struct page *page, unsigned int order);
60

Linus Torvalds's avatar
Linus Torvalds committed
61
62
63
64
65
66
67
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
68
69
70
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
71
 */
72
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
Linus Torvalds's avatar
Linus Torvalds committed
73
74
75
76
77
78
79

EXPORT_SYMBOL(totalram_pages);

/*
 * Used by page_zone() to look up the address of the struct zone whose
 * id is encoded in the upper bits of page->flags
 */
80
struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
81
82
EXPORT_SYMBOL(zone_table);

83
static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
Linus Torvalds's avatar
Linus Torvalds committed
84
85
86
87
88
int min_free_kbytes = 1024;

unsigned long __initdata nr_kernel_pages;
unsigned long __initdata nr_all_pages;

Nick Piggin's avatar
Nick Piggin committed
89
#ifdef CONFIG_DEBUG_VM
90
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
91
{
92
93
94
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
95

96
97
98
99
100
101
102
103
104
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
105
106
107
108
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
Linus Torvalds's avatar
Linus Torvalds committed
109
110
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
111
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
112
113
#endif
	if (zone != page_zone(page))
114
115
116
117
118
119
120
121
122
123
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
124
		return 1;
125
126
127
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
128
129
130
	return 0;
}

Nick Piggin's avatar
Nick Piggin committed
131
132
133
134
135
136
137
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

138
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
139
{
140
	printk(KERN_EMERG "Bad page state in process '%s'\n"
141
142
143
		KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
		KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
		KERN_EMERG "Backtrace:\n",
144
145
146
		current->comm, page, (int)(2*sizeof(unsigned long)),
		(unsigned long)page->flags, page->mapping,
		page_mapcount(page), page_count(page));
Linus Torvalds's avatar
Linus Torvalds committed
147
	dump_stack();
148
149
	page->flags &= ~(1 << PG_lru	|
			1 << PG_private |
Linus Torvalds's avatar
Linus Torvalds committed
150
151
152
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
153
154
			1 << PG_reclaim |
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
155
			1 << PG_swapcache |
156
157
			1 << PG_writeback |
			1 << PG_buddy );
Linus Torvalds's avatar
Linus Torvalds committed
158
159
160
	set_page_count(page, 0);
	reset_page_mapcount(page);
	page->mapping = NULL;
161
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
162
163
164
165
166
167
168
169
170
171
172
173
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
174
175
176
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
177
 */
178
179
180
181
182
183

static void free_compound_page(struct page *page)
{
	__free_pages_ok(page, (unsigned long)page[1].lru.prev);
}

Linus Torvalds's avatar
Linus Torvalds committed
184
185
186
187
188
static void prep_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

189
	page[1].lru.next = (void *)free_compound_page;	/* set dtor */
190
	page[1].lru.prev = (void *)order;
Linus Torvalds's avatar
Linus Torvalds committed
191
192
193
	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

194
		__SetPageCompound(p);
195
		set_page_private(p, (unsigned long)page);
Linus Torvalds's avatar
Linus Torvalds committed
196
197
198
199
200
201
202
203
	}
}

static void destroy_compound_page(struct page *page, unsigned long order)
{
	int i;
	int nr_pages = 1 << order;

204
	if (unlikely((unsigned long)page[1].lru.prev != order))
205
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
206
207
208
209

	for (i = 0; i < nr_pages; i++) {
		struct page *p = page + i;

210
211
212
		if (unlikely(!PageCompound(p) |
				(page_private(p) != (unsigned long)page)))
			bad_page(page);
213
		__ClearPageCompound(p);
Linus Torvalds's avatar
Linus Torvalds committed
214
215
216
	}
}

Nick Piggin's avatar
Nick Piggin committed
217
218
219
220
221
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
222
223
224
225
226
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
	BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
227
228
229
230
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

Linus Torvalds's avatar
Linus Torvalds committed
231
232
233
234
235
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
236
237
static inline unsigned long page_order(struct page *page)
{
238
	return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
239
240
}

241
242
static inline void set_page_order(struct page *page, int order)
{
243
	set_page_private(page, order);
244
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
245
246
247
248
}

static inline void rmv_page_order(struct page *page)
{
249
	__ClearPageBuddy(page);
250
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
 * Assumption: *_mem_map is contigious at least up to MAX_ORDER
 */
static inline struct page *
__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
{
	unsigned long buddy_idx = page_idx ^ (1 << order);

	return page + (buddy_idx - page_idx);
}

static inline unsigned long
__find_combined_index(unsigned long page_idx, unsigned int order)
{
	return (page_idx & ~(1 << order));
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
287
 * (a) the buddy is not in a hole &&
288
289
290
291
292
 * (b) the buddy is in the buddy system &&
 * (c) a page and its buddy have the same order.
 *
 * For recording whether a page is in the buddy system, we use PG_buddy.
 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
293
 *
294
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
295
296
297
 */
static inline int page_is_buddy(struct page *page, int order)
{
Nick Piggin's avatar
Nick Piggin committed
298
299
300
301
302
#ifdef CONFIG_HOLES_IN_ZONE
	if (!pfn_valid(page_to_pfn(page)))
		return 0;
#endif

303
304
	if (PageBuddy(page) && page_order(page) == order) {
		BUG_ON(page_count(page) != 0);
305
		return 1;
306
	}
307
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
323
 * free pages of length of (1 << order) and marked with PG_buddy. Page's
324
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
325
326
327
328
329
330
331
332
333
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
334
static inline void __free_one_page(struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
335
336
337
338
339
		struct zone *zone, unsigned int order)
{
	unsigned long page_idx;
	int order_size = 1 << order;

340
	if (unlikely(PageCompound(page)))
Linus Torvalds's avatar
Linus Torvalds committed
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
		destroy_compound_page(page, order);

	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

	BUG_ON(page_idx & (order_size - 1));
	BUG_ON(bad_range(zone, page));

	zone->free_pages += order_size;
	while (order < MAX_ORDER-1) {
		unsigned long combined_idx;
		struct free_area *area;
		struct page *buddy;

		buddy = __page_find_buddy(page, page_idx, order);
		if (!page_is_buddy(buddy, order))
			break;		/* Move the buddy up one level. */
Nick Piggin's avatar
Nick Piggin committed
357

Linus Torvalds's avatar
Linus Torvalds committed
358
359
360
361
		list_del(&buddy->lru);
		area = zone->free_area + order;
		area->nr_free--;
		rmv_page_order(buddy);
Nick Piggin's avatar
Nick Piggin committed
362
		combined_idx = __find_combined_index(page_idx, order);
Linus Torvalds's avatar
Linus Torvalds committed
363
364
365
366
367
368
369
370
371
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
	list_add(&page->lru, &zone->free_area[order].free_list);
	zone->free_area[order].nr_free++;
}

372
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
373
{
Nick Piggin's avatar
Nick Piggin committed
374
375
376
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
Linus Torvalds's avatar
Linus Torvalds committed
377
378
379
380
381
382
383
384
		(page->flags & (
			1 << PG_lru	|
			1 << PG_private |
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_reclaim	|
			1 << PG_slab	|
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
385
			1 << PG_writeback |
386
387
			1 << PG_reserved |
			1 << PG_buddy ))))
388
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
389
	if (PageDirty(page))
Nick Piggin's avatar
Nick Piggin committed
390
		__ClearPageDirty(page);
391
392
393
394
395
396
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not free the page.  But we shall soon need
	 * to do more, for when the ZERO_PAGE count wraps negative.
	 */
	return PageReserved(page);
Linus Torvalds's avatar
Linus Torvalds committed
397
398
399
400
401
}

/*
 * Frees a list of pages. 
 * Assumes all pages on list are in same zone, and of same order.
402
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
403
404
405
406
407
408
409
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
Nick Piggin's avatar
Nick Piggin committed
410
411
static void free_pages_bulk(struct zone *zone, int count,
					struct list_head *list, int order)
Linus Torvalds's avatar
Linus Torvalds committed
412
{
Nick Piggin's avatar
Nick Piggin committed
413
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
414
415
	zone->all_unreclaimable = 0;
	zone->pages_scanned = 0;
Nick Piggin's avatar
Nick Piggin committed
416
417
418
419
	while (count--) {
		struct page *page;

		BUG_ON(list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
420
		page = list_entry(list->prev, struct page, lru);
Nick Piggin's avatar
Nick Piggin committed
421
		/* have to delete it as __free_one_page list manipulates */
Linus Torvalds's avatar
Linus Torvalds committed
422
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
423
		__free_one_page(page, zone, order);
Linus Torvalds's avatar
Linus Torvalds committed
424
	}
Nick Piggin's avatar
Nick Piggin committed
425
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
426
427
}

Nick Piggin's avatar
Nick Piggin committed
428
static void free_one_page(struct zone *zone, struct page *page, int order)
Linus Torvalds's avatar
Linus Torvalds committed
429
430
{
	LIST_HEAD(list);
Nick Piggin's avatar
Nick Piggin committed
431
432
433
434
435
436
437
	list_add(&page->lru, &list);
	free_pages_bulk(zone, 1, &list, order);
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
438
	int i;
439
	int reserved = 0;
Linus Torvalds's avatar
Linus Torvalds committed
440
441

	arch_free_page(page, order);
442
443
	if (!PageHighMem(page))
		mutex_debug_check_no_locks_freed(page_address(page),
444
						 PAGE_SIZE<<order);
Linus Torvalds's avatar
Linus Torvalds committed
445
446

	for (i = 0 ; i < (1 << order) ; ++i)
447
		reserved += free_pages_check(page + i);
448
449
450
	if (reserved)
		return;

Nick Piggin's avatar
Nick Piggin committed
451
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
452
	local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
453
	__mod_page_state(pgfree, 1 << order);
Nick Piggin's avatar
Nick Piggin committed
454
	free_one_page(page_zone(page), page, order);
Nick Piggin's avatar
Nick Piggin committed
455
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
456
457
}

458
459
460
461
462
463
464
465
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order)
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
466
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
467
		__free_page(page);
468
469
470
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
471
		prefetchw(page);
472
473
474
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
475
476
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
477
478
479
480
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

481
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
482
		__free_pages(page, order);
483
484
485
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
501
static inline void expand(struct zone *zone, struct page *page,
Linus Torvalds's avatar
Linus Torvalds committed
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
 	int low, int high, struct free_area *area)
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
		BUG_ON(bad_range(zone, &page[size]));
		list_add(&page[size].lru, &area->free_list);
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
Nick Piggin's avatar
Nick Piggin committed
520
static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
521
{
Nick Piggin's avatar
Nick Piggin committed
522
523
524
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
		(page_count(page) != 0)  |
525
526
		(page->flags & (
			1 << PG_lru	|
Linus Torvalds's avatar
Linus Torvalds committed
527
528
529
530
531
			1 << PG_private	|
			1 << PG_locked	|
			1 << PG_active	|
			1 << PG_dirty	|
			1 << PG_reclaim	|
532
			1 << PG_slab    |
Linus Torvalds's avatar
Linus Torvalds committed
533
			1 << PG_swapcache |
Nick Piggin's avatar
Nick Piggin committed
534
			1 << PG_writeback |
535
536
			1 << PG_reserved |
			1 << PG_buddy ))))
537
		bad_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
538

539
540
541
542
543
544
545
	/*
	 * For now, we report if PG_reserved was found set, but do not
	 * clear it, and do not allocate the page: as a safety net.
	 */
	if (PageReserved(page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
546
547
548
	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
			1 << PG_referenced | 1 << PG_arch_1 |
			1 << PG_checked | 1 << PG_mappedtodisk);
549
	set_page_private(page, 0);
550
	set_page_refcounted(page);
Linus Torvalds's avatar
Linus Torvalds committed
551
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
552
553
554
555
556
557
558

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

559
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
}

/* 
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
static struct page *__rmqueue(struct zone *zone, unsigned int order)
{
	struct free_area * area;
	unsigned int current_order;
	struct page *page;

	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = zone->free_area + current_order;
		if (list_empty(&area->free_list))
			continue;

		page = list_entry(area->free_list.next, struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		zone->free_pages -= 1UL << order;
Nick Piggin's avatar
Nick Piggin committed
582
583
		expand(zone, page, order, current_order, area);
		return page;
Linus Torvalds's avatar
Linus Torvalds committed
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
	}

	return NULL;
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
			unsigned long count, struct list_head *list)
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
599
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
600
	for (i = 0; i < count; ++i) {
Nick Piggin's avatar
Nick Piggin committed
601
602
		struct page *page = __rmqueue(zone, order);
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
603
604
605
			break;
		list_add_tail(&page->lru, list);
	}
Nick Piggin's avatar
Nick Piggin committed
606
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
607
	return i;
Linus Torvalds's avatar
Linus Torvalds committed
608
609
}

610
#ifdef CONFIG_NUMA
611
612
613
/*
 * Called from the slab reaper to drain pagesets on a particular node that
 * belong to the currently executing processor.
614
615
 * Note that this function must be called with the thread pinned to
 * a single processor.
616
617
 */
void drain_node_pages(int nodeid)
618
{
619
	int i, z;
620
621
	unsigned long flags;

622
623
	for (z = 0; z < MAX_NR_ZONES; z++) {
		struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
624
625
		struct per_cpu_pageset *pset;

Nick Piggin's avatar
Nick Piggin committed
626
		pset = zone_pcp(zone, smp_processor_id());
627
628
629
630
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
631
632
633
634
635
636
			if (pcp->count) {
				local_irq_save(flags);
				free_pages_bulk(zone, pcp->count, &pcp->list, 0);
				pcp->count = 0;
				local_irq_restore(flags);
			}
637
638
639
640
641
		}
	}
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
642
643
644
#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
static void __drain_pages(unsigned int cpu)
{
Nick Piggin's avatar
Nick Piggin committed
645
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
646
647
648
649
650
651
	struct zone *zone;
	int i;

	for_each_zone(zone) {
		struct per_cpu_pageset *pset;

652
		pset = zone_pcp(zone, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
653
654
655
656
		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
			struct per_cpu_pages *pcp;

			pcp = &pset->pcp[i];
Nick Piggin's avatar
Nick Piggin committed
657
			local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
658
659
			free_pages_bulk(zone, pcp->count, &pcp->list, 0);
			pcp->count = 0;
Nick Piggin's avatar
Nick Piggin committed
660
			local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
		}
	}
}
#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */

#ifdef CONFIG_PM

void mark_free_pages(struct zone *zone)
{
	unsigned long zone_pfn, flags;
	int order;
	struct list_head *curr;

	if (!zone->spanned_pages)
		return;

	spin_lock_irqsave(&zone->lock, flags);
	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));

	for (order = MAX_ORDER - 1; order >= 0; --order)
		list_for_each(curr, &zone->free_area[order].free_list) {
			unsigned long start_pfn, i;

			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));

			for (i=0; i < (1<<order); i++)
				SetPageNosaveFree(pfn_to_page(start_pfn+i));
	}
	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 */
void drain_local_pages(void)
{
	unsigned long flags;

	local_irq_save(flags);	
	__drain_pages(smp_processor_id());
	local_irq_restore(flags);	
}
#endif /* CONFIG_PM */

Nick Piggin's avatar
Nick Piggin committed
706
static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
707
708
709
710
711
712
{
#ifdef CONFIG_NUMA
	pg_data_t *pg = z->zone_pgdat;
	pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
	struct per_cpu_pageset *p;

Nick Piggin's avatar
Nick Piggin committed
713
	p = zone_pcp(z, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
714
	if (pg == orig) {
715
		p->numa_hit++;
Linus Torvalds's avatar
Linus Torvalds committed
716
717
	} else {
		p->numa_miss++;
718
		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
Linus Torvalds's avatar
Linus Torvalds committed
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
	}
	if (pg == NODE_DATA(numa_node_id()))
		p->local_node++;
	else
		p->other_node++;
#endif
}

/*
 * Free a 0-order page
 */
static void fastcall free_hot_cold_page(struct page *page, int cold)
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	unsigned long flags;

	arch_free_page(page, 0);

	if (PageAnon(page))
		page->mapping = NULL;
740
	if (free_pages_check(page))
741
742
743
744
		return;

	kernel_map_pages(page, 1, 0);

745
	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
746
	local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
747
	__inc_page_state(pgfree);
Linus Torvalds's avatar
Linus Torvalds committed
748
749
	list_add(&page->lru, &pcp->list);
	pcp->count++;
Nick Piggin's avatar
Nick Piggin committed
750
751
752
753
	if (pcp->count >= pcp->high) {
		free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
		pcp->count -= pcp->batch;
	}
Linus Torvalds's avatar
Linus Torvalds committed
754
755
756
757
758
759
760
761
762
763
764
765
766
767
	local_irq_restore(flags);
	put_cpu();
}

void fastcall free_hot_page(struct page *page)
{
	free_hot_cold_page(page, 0);
}
	
void fastcall free_cold_page(struct page *page)
{
	free_hot_cold_page(page, 1);
}

768
769
770
771
772
773
774
775
776
777
778
779
780
781
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

	BUG_ON(PageCompound(page));
	BUG_ON(!page_count(page));
782
783
	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
784
785
}

Linus Torvalds's avatar
Linus Torvalds committed
786
787
788
789
790
/*
 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
 * or two.
 */
Nick Piggin's avatar
Nick Piggin committed
791
792
static struct page *buffered_rmqueue(struct zonelist *zonelist,
			struct zone *zone, int order, gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
793
794
{
	unsigned long flags;
795
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
796
	int cold = !!(gfp_flags & __GFP_COLD);
Nick Piggin's avatar
Nick Piggin committed
797
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
798

799
again:
Nick Piggin's avatar
Nick Piggin committed
800
	cpu  = get_cpu();
Nick Piggin's avatar
Nick Piggin committed
801
	if (likely(order == 0)) {
Linus Torvalds's avatar
Linus Torvalds committed
802
803
		struct per_cpu_pages *pcp;

Nick Piggin's avatar
Nick Piggin committed
804
		pcp = &zone_pcp(zone, cpu)->pcp[cold];
Linus Torvalds's avatar
Linus Torvalds committed
805
		local_irq_save(flags);
Nick Piggin's avatar
Nick Piggin committed
806
		if (!pcp->count) {
Linus Torvalds's avatar
Linus Torvalds committed
807
808
			pcp->count += rmqueue_bulk(zone, 0,
						pcp->batch, &pcp->list);
Nick Piggin's avatar
Nick Piggin committed
809
810
			if (unlikely(!pcp->count))
				goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
811
		}
Nick Piggin's avatar
Nick Piggin committed
812
813
814
		page = list_entry(pcp->list.next, struct page, lru);
		list_del(&page->lru);
		pcp->count--;
Rohit Seth's avatar
Rohit Seth committed
815
	} else {
Linus Torvalds's avatar
Linus Torvalds committed
816
817
		spin_lock_irqsave(&zone->lock, flags);
		page = __rmqueue(zone, order);
Nick Piggin's avatar
Nick Piggin committed
818
819
820
		spin_unlock(&zone->lock);
		if (!page)
			goto failed;
Linus Torvalds's avatar
Linus Torvalds committed
821
822
	}

Nick Piggin's avatar
Nick Piggin committed
823
824
825
826
	__mod_page_state_zone(zone, pgalloc, 1 << order);
	zone_statistics(zonelist, zone, cpu);
	local_irq_restore(flags);
	put_cpu();
Linus Torvalds's avatar
Linus Torvalds committed
827

Nick Piggin's avatar
Nick Piggin committed
828
	BUG_ON(bad_range(zone, page));
Nick Piggin's avatar
Nick Piggin committed
829
	if (prep_new_page(page, order, gfp_flags))
Nick Piggin's avatar
Nick Piggin committed
830
		goto again;
Linus Torvalds's avatar
Linus Torvalds committed
831
	return page;
Nick Piggin's avatar
Nick Piggin committed
832
833
834
835
836

failed:
	local_irq_restore(flags);
	put_cpu();
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
837
838
}

Rohit Seth's avatar
Rohit Seth committed
839
#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
840
841
842
843
844
845
#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
#define ALLOC_HARDER		0x10 /* try to alloc harder */
#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
Rohit Seth's avatar
Rohit Seth committed
846

Linus Torvalds's avatar
Linus Torvalds committed
847
848
849
850
851
/*
 * Return 1 if free pages are above 'mark'. This takes into account the order
 * of the allocation.
 */
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
Rohit Seth's avatar
Rohit Seth committed
852
		      int classzone_idx, int alloc_flags)
Linus Torvalds's avatar
Linus Torvalds committed
853
854
855
856
857
{
	/* free_pages my go negative - that's OK */
	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
	int o;

Rohit Seth's avatar
Rohit Seth committed
858
	if (alloc_flags & ALLOC_HIGH)
Linus Torvalds's avatar
Linus Torvalds committed
859
		min -= min / 2;
Rohit Seth's avatar
Rohit Seth committed
860
	if (alloc_flags & ALLOC_HARDER)
Linus Torvalds's avatar
Linus Torvalds committed
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
		min -= min / 4;

	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
		return 0;
	for (o = 0; o < order; o++) {
		/* At the next order, this order's pages become unavailable */
		free_pages -= z->free_area[o].nr_free << o;

		/* Require fewer higher order pages to be free */
		min >>= 1;

		if (free_pages <= min)
			return 0;
	}
	return 1;
}

Rohit Seth's avatar
Rohit Seth committed
878
879
880
881
882
883
884
/*
 * get_page_from_freeliest goes through the zonelist trying to allocate
 * a page.
 */
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
		struct zonelist *zonelist, int alloc_flags)
Martin Hicks's avatar
Martin Hicks committed
885
{
Rohit Seth's avatar
Rohit Seth committed
886
887
888
889
890
891
892
893
894
895
896
897
898
899
	struct zone **z = zonelist->zones;
	struct page *page = NULL;
	int classzone_idx = zone_idx(*z);

	/*
	 * Go through the zonelist once, looking for a zone with enough free.
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
	 */
	do {
		if ((alloc_flags & ALLOC_CPUSET) &&
				!cpuset_zone_allowed(*z, gfp_mask))
			continue;

		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
900
901
902
903
904
905
906
907
			unsigned long mark;
			if (alloc_flags & ALLOC_WMARK_MIN)
				mark = (*z)->pages_min;
			else if (alloc_flags & ALLOC_WMARK_LOW)
				mark = (*z)->pages_low;
			else
				mark = (*z)->pages_high;
			if (!zone_watermark_ok(*z, order, mark,
Rohit Seth's avatar
Rohit Seth committed
908
				    classzone_idx, alloc_flags))
909
910
911
				if (!zone_reclaim_mode ||
				    !zone_reclaim(*z, gfp_mask, order))
					continue;
Rohit Seth's avatar
Rohit Seth committed
912
913
		}

Nick Piggin's avatar
Nick Piggin committed
914
		page = buffered_rmqueue(zonelist, *z, order, gfp_mask);
Rohit Seth's avatar
Rohit Seth committed
915
916
917
918
919
		if (page) {
			break;
		}
	} while (*(++z) != NULL);
	return page;
Martin Hicks's avatar
Martin Hicks committed
920
921
}

Linus Torvalds's avatar
Linus Torvalds committed
922
923
924
925
/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page * fastcall
926
__alloc_pages(gfp_t gfp_mask, unsigned int order,
Linus Torvalds's avatar
Linus Torvalds committed
927
928
		struct zonelist *zonelist)
{
Al Viro's avatar
Al Viro committed
929
	const gfp_t wait = gfp_mask & __GFP_WAIT;
Rohit Seth's avatar
Rohit Seth committed
930
	struct zone **z;
Linus Torvalds's avatar
Linus Torvalds committed
931
932
933
934
	struct page *page;
	struct reclaim_state reclaim_state;
	struct task_struct *p = current;
	int do_retry;
Rohit Seth's avatar
Rohit Seth committed
935
	int alloc_flags;
Linus Torvalds's avatar
Linus Torvalds committed
936
937
938
939
	int did_some_progress;

	might_sleep_if(wait);

940
restart:
Rohit Seth's avatar
Rohit Seth committed
941
	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
Linus Torvalds's avatar
Linus Torvalds committed
942

Rohit Seth's avatar
Rohit Seth committed
943
	if (unlikely(*z == NULL)) {
Linus Torvalds's avatar
Linus Torvalds committed
944
945
946
		/* Should this ever happen?? */
		return NULL;
	}
947

Rohit Seth's avatar
Rohit Seth committed
948
	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
949
				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
950
951
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
952

953
	do {
954
955
		if (cpuset_zone_allowed(*z, gfp_mask))
			wakeup_kswapd(*z, order);
956
	} while (*(++z));
Linus Torvalds's avatar
Linus Torvalds committed
957

958
	/*
Rohit Seth's avatar
Rohit Seth committed
959
960
961
962
963
964
	 * OK, we're below the kswapd watermark and have kicked background
	 * reclaim. Now things get more complex, so set up alloc_flags according
	 * to how we want to proceed.
	 *
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
Paul Jackson's avatar
Paul Jackson committed
965
966
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
	 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
967
	 */
968
	alloc_flags = ALLOC_WMARK_MIN;
Rohit Seth's avatar
Rohit Seth committed
969
970
971
972
	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
		alloc_flags |= ALLOC_HARDER;
	if (gfp_mask & __GFP_HIGH)
		alloc_flags |= ALLOC_HIGH;
973
	alloc_flags |= ALLOC_CPUSET;
Linus Torvalds's avatar
Linus Torvalds committed
974
975
976

	/*
	 * Go through the zonelist again. Let __GFP_HIGH and allocations
Rohit Seth's avatar
Rohit Seth committed
977
	 * coming from realtime tasks go deeper into reserves.
Linus Torvalds's avatar
Linus Torvalds committed
978
979
980
	 *
	 * This is the last chance, in general, before the goto nopage.
	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
981
	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
Linus Torvalds's avatar
Linus Torvalds committed
982
	 */
Rohit Seth's avatar
Rohit Seth committed
983
984
985
	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
	if (page)
		goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
986
987

	/* This allocation should allow future memory freeing. */
988
989
990
991

	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
			&& !in_interrupt()) {
		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
Kirill Korotaev's avatar
Kirill Korotaev committed
992
nofail_alloc:
993
			/* go through the zonelist yet again, ignoring mins */
Rohit Seth's avatar
Rohit Seth committed
994
			page = get_page_from_freelist(gfp_mask, order,
995
				zonelist, ALLOC_NO_WATERMARKS);
Rohit Seth's avatar
Rohit Seth committed
996
997
			if (page)
				goto got_pg;
Kirill Korotaev's avatar
Kirill Korotaev committed
998
999
1000
1001
			if (gfp_mask & __GFP_NOFAIL) {
				blk_congestion_wait(WRITE, HZ/50);
				goto nofail_alloc;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
		}
		goto nopage;
	}

	/* Atomic allocations - we can't balance anything */
	if (!wait)
		goto nopage;

rebalance:
	cond_resched();

	/* We now go into synchronous reclaim */
1014
	cpuset_memory_pressure_bump();
Linus Torvalds's avatar
Linus Torvalds committed
1015
1016
1017
1018
	p->flags |= PF_MEMALLOC;
	reclaim_state.reclaimed_slab = 0;
	p->reclaim_state = &reclaim_state;

Rohit Seth's avatar
Rohit Seth committed
1019
	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
1020
1021
1022
1023
1024
1025
1026

	p->reclaim_state = NULL;
	p->flags &= ~PF_MEMALLOC;

	cond_resched();

	if (likely(did_some_progress)) {
Rohit Seth's avatar
Rohit Seth committed
1027
1028
1029
1030
		page = get_page_from_freelist(gfp_mask, order,
						zonelist, alloc_flags);
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1031
1032
1033
1034
1035
1036
1037
	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
		/*
		 * Go through the zonelist yet one more time, keep
		 * very high watermark here, this is only to catch
		 * a parallel oom killing, we must fail if we're still
		 * under heavy pressure.
		 */
Rohit Seth's avatar
Rohit Seth committed
1038
		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
1039
				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
Rohit Seth's avatar
Rohit Seth committed
1040
1041
		if (page)
			goto got_pg;
Linus Torvalds's avatar
Linus Torvalds committed
1042

1043
		out_of_memory(zonelist, gfp_mask, order);
Linus Torvalds's avatar
Linus Torvalds committed
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
		goto restart;
	}

	/*
	 * Don't let big-order allocations loop unless the caller explicitly
	 * requests that.  Wait for some write requests to complete then retry.
	 *
	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
	 * <= 3, but that may not be true in other implementations.
	 */
	do_retry = 0;
	if (!(gfp_mask & __GFP_NORETRY)) {
		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
			do_retry = 1;
		if (gfp_mask & __GFP_NOFAIL)
			do_retry = 1;
	}
	if (do_retry) {
		blk_congestion_wait(WRITE, HZ/50);
		goto rebalance;
	}

nopage:
	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
		printk(KERN_WARNING "%s: page allocation failure."
			" order:%d, mode:0x%x\n",
			p->comm, order, gfp_mask);
		dump_stack();
Janet Morgan's avatar
Janet Morgan committed
1072
		show_mem();
Linus Torvalds's avatar
Linus Torvalds committed
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
	}
got_pg:
	return page;
}

EXPORT_SYMBOL(__alloc_pages);

/*
 * Common helper functions.
 */
1083
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
{
	struct page * page;
	page = alloc_pages(gfp_mask, order);
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}

EXPORT_SYMBOL(__get_free_pages);

1094
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
1095
1096
1097
1098
1099
1100
1101
{
	struct page * page;

	/*
	 * get_zeroed_page() returns a 32-bit address, which cannot represent
	 * a highmem page
	 */
Al Viro's avatar
Al Viro committed
1102
	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
Linus Torvalds's avatar
Linus Torvalds committed
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121

	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
	if (page)
		return (unsigned long) page_address(page);
	return 0;
}

EXPORT_SYMBOL(get_zeroed_page);

void __pagevec_free(struct pagevec *pvec)
{
	int i = pagevec_count(pvec);

	while (--i >= 0)
		free_hot_cold_page(pvec->pages[i], pvec->cold);
}

fastcall void __free_pages(struct page *page, unsigned int order)
{
Nick Piggin's avatar
Nick Piggin committed
1122
	if (put_page_testzero(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
		if (order == 0)
			free_hot_page(page);
		else
			__free_pages_ok(page, order);
	}
}

EXPORT_SYMBOL(__free_pages);

fastcall void free_pages(unsigned long addr, unsigned int order)
{
	if (addr != 0) {
		BUG_ON(!virt_addr_valid((void *)addr));
		__free_pages(virt_to_page((void *)addr), order);
	}
}

EXPORT_SYMBOL(free_pages);

/*
 * Total amount of free (allocatable) RAM:
 */
unsigned int nr_free_pages(void)
{
	unsigned int sum = 0;
	struct zone *zone;

	for_each_zone(zone)
		sum += zone->free_pages;

	return sum;
}

EXPORT_SYMBOL(nr_free_pages);

#ifdef CONFIG_NUMA
unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
{
	unsigned int i, sum = 0;

	for (i = 0; i < MAX_NR_ZONES; i++)
		sum += pgdat->node_zones[i].free_pages;

	return sum;
}
#endif

static unsigned int nr_free_zone_pages(int offset)
{
1172
1173
	/* Just pick one node, since fallback list is circular */
	pg_data_t *pgdat = NODE_DATA(numa_node_id());
Linus Torvalds's avatar
Linus Torvalds committed
1174
1175
	unsigned int sum = 0;

1176
1177
1178
	struct zonelist *zonelist = pgdat->node_zonelists + offset;
	struct zone **zonep = zonelist->zones;
	struct zone *zone;
Linus Torvalds's avatar
Linus Torvalds committed
1179

1180
1181
1182
1183
1184
	for (zone = *zonep++; zone; zone = *zonep++) {
		unsigned long size = zone->present_pages;
		unsigned long high = zone->pages_high;
		if (size > high)
			sum += size - high;
Linus Torvalds's avatar
Linus Torvalds committed
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
	}

	return sum;
}

/*
 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
 */
unsigned int nr_free_buffer_pages(void)
{
Al Viro's avatar
Al Viro committed
1195
	return nr_free_zone_pages(gfp_zone(GFP_USER));
Linus Torvalds's avatar
Linus Torvalds committed
1196
1197
1198
1199
1200
1201
1202
}

/*
 * Amount of free RAM allocatable within all zones
 */
unsigned int nr_free_pagecache_pages(void)
{
Al Viro's avatar
Al Viro committed
1203
	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
Linus Torvalds's avatar
Linus Torvalds committed
1204
1205
1206
1207
1208
1209
1210
1211
}

#ifdef CONFIG_HIGHMEM
unsigned int nr_free_highpages (void)
{
	pg_data_t *pgdat;
	unsigned int pages = 0;

1212
	for_each_online_pgdat(pgdat)
Linus Torvalds's avatar
Linus Torvalds committed
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
		pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;

	return pages;
}
#endif

#ifdef CONFIG_NUMA
static void show_node(struct zone *zone)
{
	printk("Node %d ", zone->zone_pgdat->node_id);
}
#else
#define show_node(zone)	do { } while (0)
#endif

/*
 * Accumulate the page_state information across all CPUs.
 * The result is unavoidably approximate - it can change
 * during and after execution of this function.
 */
static DEFINE_PER_CPU(struct page_state, page_states) = {0};

atomic_t nr_pagecache = ATOMIC_INIT(0);
EXPORT_SYMBOL(nr_pagecache);
#ifdef CONFIG_SMP
DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
#endif

Nick Piggin's avatar
Nick Piggin committed
1241
static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
Linus Torvalds's avatar
Linus Torvalds committed
1242
{
1243
	unsigned cpu;
Linus Torvalds's avatar
Linus Torvalds committed
1244

1245
	memset(ret, 0, nr * sizeof(unsigned long));
1246
	cpus_and(*cpumask, *cpumask, cpu_online_map);
Linus Torvalds's avatar
Linus Torvalds committed
1247

1248
1249
1250
1251
1252
	for_each_cpu_mask(cpu, *cpumask) {
		unsigned long *in;
		unsigned long *out;
		unsigned off;
		unsigned next_cpu;
1253

Linus Torvalds's avatar
Linus Torvalds committed
1254
1255
		in = (unsigned long *)&per_cpu(page_states, cpu);

1256
1257
1258
		next_cpu = next_cpu(cpu, *cpumask);
		if (likely(next_cpu < NR_CPUS))
			prefetch(&per_cpu(page_states, next_cpu));
Linus Torvalds's avatar
Linus Torvalds committed
1259
1260
1261
1262
1263
1264
1265

		out = (unsigned long *)ret;
		for (off = 0; off < nr; off++)
			*out++ += *in++;
	}
}

1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
void get_page_state_node(struct page_state *ret, int node)
{
	int nr;
	cpumask_t mask = node_to_cpumask(node);

	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
	nr /= sizeof(unsigned long);

	__get_page_state(ret, nr+1, &mask);
}

Linus Torvalds's avatar
Linus Torvalds committed
1277
1278
1279
void get_page_state(struct page_state *ret)
{
	int nr;
1280
	cpumask_t mask = CPU_MASK_ALL;
Linus Torvalds's avatar
Linus Torvalds committed
1281
1282
1283
1284

	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
	nr /= sizeof(unsigned long);

1285
	__get_page_state(ret, nr + 1, &mask);
Linus Torvalds's avatar
Linus Torvalds committed
1286
1287
1288
1289
}

void get_full_page_state(struct page_state *ret)
{
1290
1291
1292
	cpumask_t mask = CPU_MASK_ALL;

	__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
Linus Torvalds's avatar
Linus Torvalds committed
1293
1294
}

Nick Piggin's avatar
Nick Piggin committed
1295
unsigned long read_page_state_offset(unsigned long offset)
Linus Torvalds's avatar
Linus Torvalds committed
1296
1297
1298
1299
{
	unsigned long ret = 0;
	int cpu;

1300
	for_each_online_cpu(cpu) {
Linus Torvalds's avatar
Linus Torvalds committed
1301
1302
1303
1304
1305
1306
1307
1308
		unsigned long in;

		in = (unsigned long)&per_cpu(page_states, cpu) + offset;
		ret += *((unsigned long *)in);
	}
	return ret;
}

Nick Piggin's avatar
Nick Piggin committed
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
void __mod_page_state_offset(unsigned long offset, unsigned long delta)
{
	void *ptr;

	ptr = &__get_cpu_var(page_states);
	*(unsigned long *)(ptr + offset) += delta;
}
EXPORT_SYMBOL(__mod_page_state_offset);

void mod_page_state_offset(unsigned long offset, unsigned long delta)
Linus Torvalds's avatar
Linus Torvalds committed
1319
1320
{
	unsigned long flags;
Nick Piggin's avatar
Nick Piggin committed
1321
	void *ptr;