page_alloc.c 159 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
22
#include <linux/jiffies.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/bootmem.h>
24
#include <linux/memblock.h>
Linus Torvalds's avatar
Linus Torvalds committed
25
#include <linux/compiler.h>
26
#include <linux/kernel.h>
27
#include <linux/kmemcheck.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
29
30
31
32
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
33
#include <linux/ratelimit.h>
34
#include <linux/oom.h>
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37
38
39
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
40
#include <linux/memory_hotplug.h>
Linus Torvalds's avatar
Linus Torvalds committed
41
42
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
43
#include <linux/vmstat.h>
44
#include <linux/mempolicy.h>
45
#include <linux/stop_machine.h>
46
47
#include <linux/sort.h>
#include <linux/pfn.h>
48
#include <linux/backing-dev.h>
49
#include <linux/fault-inject.h>
50
#include <linux/page-isolation.h>
51
#include <linux/page_cgroup.h>
52
#include <linux/debugobjects.h>
53
#include <linux/kmemleak.h>
54
#include <linux/memory.h>
55
#include <linux/compaction.h>
56
#include <trace/events/kmem.h>
57
#include <linux/ftrace_event.h>
58
#include <linux/memcontrol.h>
59
#include <linux/prefetch.h>
Linus Torvalds's avatar
Linus Torvalds committed
60
61

#include <asm/tlbflush.h>
62
#include <asm/div64.h>
Linus Torvalds's avatar
Linus Torvalds committed
63
64
#include "internal.h"

65
66
67
68
69
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

70
71
72
73
74
75
76
77
78
79
80
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
81
/*
82
 * Array of node states.
Linus Torvalds's avatar
Linus Torvalds committed
83
 */
84
85
86
87
88
89
90
91
92
93
94
95
96
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
#endif
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

97
unsigned long totalram_pages __read_mostly;
98
unsigned long totalreserve_pages __read_mostly;
99
int percpu_pagelist_fraction;
100
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
101

102
103
104
105
106
107
108
109
110
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 * guaranteed not to run in parallel with that modification).
 */
111
112
113
114

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
115
116
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
117
118
119
120
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
121
122
}

123
void pm_restrict_gfp_mask(void)
124
125
{
	WARN_ON(!mutex_is_locked(&pm_mutex));
126
127
128
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
	gfp_allowed_mask &= ~GFP_IOFS;
129
130
131
}
#endif /* CONFIG_PM_SLEEP */

132
133
134
135
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
int pageblock_order __read_mostly;
#endif

136
static void __free_pages_ok(struct page *page, unsigned int order);
137

Linus Torvalds's avatar
Linus Torvalds committed
138
139
140
141
142
143
144
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
145
146
147
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
Linus Torvalds's avatar
Linus Torvalds committed
148
 */
149
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
150
#ifdef CONFIG_ZONE_DMA
151
	 256,
152
#endif
153
#ifdef CONFIG_ZONE_DMA32
154
	 256,
155
#endif
156
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
157
	 32,
158
#endif
Mel Gorman's avatar
Mel Gorman committed
159
	 32,
160
};
Linus Torvalds's avatar
Linus Torvalds committed
161
162
163

EXPORT_SYMBOL(totalram_pages);

164
static char * const zone_names[MAX_NR_ZONES] = {
165
#ifdef CONFIG_ZONE_DMA
166
	 "DMA",
167
#endif
168
#ifdef CONFIG_ZONE_DMA32
169
	 "DMA32",
170
#endif
171
	 "Normal",
172
#ifdef CONFIG_HIGHMEM
Mel Gorman's avatar
Mel Gorman committed
173
	 "HighMem",
174
#endif
Mel Gorman's avatar
Mel Gorman committed
175
	 "Movable",
176
177
};

Linus Torvalds's avatar
Linus Torvalds committed
178
179
int min_free_kbytes = 1024;

180
181
static unsigned long __meminitdata nr_kernel_pages;
static unsigned long __meminitdata nr_all_pages;
182
static unsigned long __meminitdata dma_reserve;
Linus Torvalds's avatar
Linus Torvalds committed
183

184
185
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
  /*
Simon Arlott's avatar
Simon Arlott committed
186
   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
   * ranges of memory (RAM) that may be registered with add_active_range().
   * Ranges passed to add_active_range() will be merged if possible
   * so the number of times add_active_range() can be called is
   * related to the number of nodes and the number of holes
   */
  #ifdef CONFIG_MAX_ACTIVE_REGIONS
    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
  #else
    #if MAX_NUMNODES >= 32
      /* If there can be many nodes, allow up to 50 holes per node */
      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
    #else
      /* By default, allow up to 256 distinct regions */
      #define MAX_ACTIVE_REGIONS 256
    #endif
  #endif

205
206
207
208
  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
  static int __meminitdata nr_nodemap_entries;
  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
Adrian Bunk's avatar
Adrian Bunk committed
209
  static unsigned long __initdata required_kernelcore;
210
  static unsigned long __initdata required_movablecore;
Adrian Bunk's avatar
Adrian Bunk committed
211
  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
Mel Gorman's avatar
Mel Gorman committed
212
213
214
215

  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  int movable_zone;
  EXPORT_SYMBOL(movable_zone);
216
217
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */

Miklos Szeredi's avatar
Miklos Szeredi committed
218
219
#if MAX_NUMNODES > 1
int nr_node_ids __read_mostly = MAX_NUMNODES;
220
int nr_online_nodes __read_mostly = 1;
Miklos Szeredi's avatar
Miklos Szeredi committed
221
EXPORT_SYMBOL(nr_node_ids);
222
EXPORT_SYMBOL(nr_online_nodes);
Miklos Szeredi's avatar
Miklos Szeredi committed
223
224
#endif

225
226
int page_group_by_mobility_disabled __read_mostly;

227
228
static void set_pageblock_migratetype(struct page *page, int migratetype)
{
229
230
231
232

	if (unlikely(page_group_by_mobility_disabled))
		migratetype = MIGRATE_UNMOVABLE;

233
234
235
236
	set_pageblock_flags_group(page, (unsigned long)migratetype,
					PB_migrate, PB_migrate_end);
}

237
238
bool oom_killer_disabled __read_mostly;

Nick Piggin's avatar
Nick Piggin committed
239
#ifdef CONFIG_DEBUG_VM
240
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
241
{
242
243
244
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
245

246
247
248
249
250
251
252
253
254
	do {
		seq = zone_span_seqbegin(zone);
		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
			ret = 1;
		else if (pfn < zone->zone_start_pfn)
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

	return ret;
255
256
257
258
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
259
	if (!pfn_valid_within(page_to_pfn(page)))
260
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
261
	if (zone != page_zone(page))
262
263
264
265
266
267
268
269
270
271
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
static int bad_range(struct zone *zone, struct page *page)
{
	if (page_outside_zone_boundaries(zone, page))
Linus Torvalds's avatar
Linus Torvalds committed
272
		return 1;
273
274
275
	if (!page_is_consistent(zone, page))
		return 1;

Linus Torvalds's avatar
Linus Torvalds committed
276
277
	return 0;
}
Nick Piggin's avatar
Nick Piggin committed
278
279
280
281
282
283
284
#else
static inline int bad_range(struct zone *zone, struct page *page)
{
	return 0;
}
#endif

285
static void bad_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
286
{
287
288
289
290
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

291
292
	/* Don't complain about poisoned pages */
	if (PageHWPoison(page)) {
293
		reset_page_mapcount(page); /* remove PageBuddy */
294
295
296
		return;
	}

297
298
299
300
301
302
303
304
305
306
	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
307
308
			printk(KERN_ALERT
			      "BUG: Bad page state: %lu messages suppressed\n",
309
310
311
312
313
314
315
316
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

317
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
318
		current->comm, page_to_pfn(page));
319
	dump_page(page);
320

321
	print_modules();
Linus Torvalds's avatar
Linus Torvalds committed
322
	dump_stack();
323
out:
324
	/* Leave bad fields for debug, except PageBuddy could make trouble */
325
	reset_page_mapcount(page); /* remove PageBuddy */
326
	add_taint(TAINT_BAD_PAGE);
Linus Torvalds's avatar
Linus Torvalds committed
327
328
329
330
331
332
333
334
335
336
337
338
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
 * The first PAGE_SIZE page is called the "head page".
 *
 * The remaining PAGE_SIZE pages are called "tail pages".
 *
 * All pages have PG_compound set.  All pages have their ->private pointing at
 * the head page (even the head page has this).
 *
339
340
341
 * The first tail page's ->lru.next holds the address of the compound page's
 * put_page() function.  Its ->lru.prev holds the order of allocation.
 * This usage means that zero-order pages may not be compound.
Linus Torvalds's avatar
Linus Torvalds committed
342
 */
343
344
345

static void free_compound_page(struct page *page)
{
346
	__free_pages_ok(page, compound_order(page));
347
348
}

349
void prep_compound_page(struct page *page, unsigned long order)
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
{
	int i;
	int nr_pages = 1 << order;

	set_compound_page_dtor(page, free_compound_page);
	set_compound_order(page, order);
	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;

		__SetPageTail(p);
		p->first_page = page;
	}
}

365
/* update __split_huge_page_refcount if you change this function */
366
static int destroy_compound_page(struct page *page, unsigned long order)
Linus Torvalds's avatar
Linus Torvalds committed
367
368
369
{
	int i;
	int nr_pages = 1 << order;
370
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
371

372
373
	if (unlikely(compound_order(page) != order) ||
	    unlikely(!PageHead(page))) {
374
		bad_page(page);
375
376
		bad++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
377

378
	__ClearPageHead(page);
379

380
381
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
Linus Torvalds's avatar
Linus Torvalds committed
382

383
		if (unlikely(!PageTail(p) || (p->first_page != page))) {
384
			bad_page(page);
385
386
			bad++;
		}
387
		__ClearPageTail(p);
Linus Torvalds's avatar
Linus Torvalds committed
388
	}
389
390

	return bad;
Linus Torvalds's avatar
Linus Torvalds committed
391
392
}

Nick Piggin's avatar
Nick Piggin committed
393
394
395
396
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

397
398
399
400
	/*
	 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
	 * and __GFP_HIGHMEM from hard or soft interrupt context.
	 */
Nick Piggin's avatar
Nick Piggin committed
401
	VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
Nick Piggin's avatar
Nick Piggin committed
402
403
404
405
	for (i = 0; i < (1 << order); i++)
		clear_highpage(page + i);
}

406
407
static inline void set_page_order(struct page *page, int order)
{
408
	set_page_private(page, order);
409
	__SetPageBuddy(page);
Linus Torvalds's avatar
Linus Torvalds committed
410
411
412
413
}

static inline void rmv_page_order(struct page *page)
{
414
	__ClearPageBuddy(page);
415
	set_page_private(page, 0);
Linus Torvalds's avatar
Linus Torvalds committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
}

/*
 * Locate the struct page for both the matching buddy in our
 * pair (buddy1) and the combined O(n+1) page they form (page).
 *
 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 * the following equation:
 *     B2 = B1 ^ (1 << O)
 * For example, if the starting buddy (buddy2) is #8 its order
 * 1 buddy is #10:
 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 *
 * 2) Any buddy B will have an order O+1 parent P which
 * satisfies the following equation:
 *     P = B & ~(1 << O)
 *
Andreas Mohr's avatar
Andreas Mohr committed
433
 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
Linus Torvalds's avatar
Linus Torvalds committed
434
435
 */
static inline unsigned long
436
__find_buddy_index(unsigned long page_idx, unsigned int order)
Linus Torvalds's avatar
Linus Torvalds committed
437
{
438
	return page_idx ^ (1 << order);
Linus Torvalds's avatar
Linus Torvalds committed
439
440
441
442
443
}

/*
 * This function checks whether a page is free && is the buddy
 * we can do coalesce a page and its buddy if
Nick Piggin's avatar
Nick Piggin committed
444
 * (a) the buddy is not in a hole &&
445
 * (b) the buddy is in the buddy system &&
446
447
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
448
 *
Andrea Arcangeli's avatar
Andrea Arcangeli committed
449
450
 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
Linus Torvalds's avatar
Linus Torvalds committed
451
 *
452
 * For recording page's order, we use page_private(page).
Linus Torvalds's avatar
Linus Torvalds committed
453
 */
454
455
static inline int page_is_buddy(struct page *page, struct page *buddy,
								int order)
Linus Torvalds's avatar
Linus Torvalds committed
456
{
457
	if (!pfn_valid_within(page_to_pfn(buddy)))
Nick Piggin's avatar
Nick Piggin committed
458
459
		return 0;

460
461
462
463
	if (page_zone_id(page) != page_zone_id(buddy))
		return 0;

	if (PageBuddy(buddy) && page_order(buddy) == order) {
464
		VM_BUG_ON(page_count(buddy) != 0);
465
		return 1;
466
	}
467
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
}

/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
Andrea Arcangeli's avatar
Andrea Arcangeli committed
483
 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
484
 * order is recorded in page_private(page) field.
Linus Torvalds's avatar
Linus Torvalds committed
485
486
487
488
489
490
491
492
493
 * So when we are allocating or freeing one, we can derive the state of the
 * other.  That is, if we allocate a small block, and both were   
 * free, the remainder of the region must be split into blocks.   
 * If a block is freed, and its buddy is also free, then this
 * triggers coalescing into a block of larger size.            
 *
 * -- wli
 */

Nick Piggin's avatar
Nick Piggin committed
494
static inline void __free_one_page(struct page *page,
495
496
		struct zone *zone, unsigned int order,
		int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
497
498
{
	unsigned long page_idx;
499
	unsigned long combined_idx;
500
	unsigned long uninitialized_var(buddy_idx);
501
	struct page *buddy;
Linus Torvalds's avatar
Linus Torvalds committed
502

503
	if (unlikely(PageCompound(page)))
504
505
		if (unlikely(destroy_compound_page(page, order)))
			return;
Linus Torvalds's avatar
Linus Torvalds committed
506

507
508
	VM_BUG_ON(migratetype == -1);

Linus Torvalds's avatar
Linus Torvalds committed
509
510
	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);

511
	VM_BUG_ON(page_idx & ((1 << order) - 1));
Nick Piggin's avatar
Nick Piggin committed
512
	VM_BUG_ON(bad_range(zone, page));
Linus Torvalds's avatar
Linus Torvalds committed
513
514

	while (order < MAX_ORDER-1) {
515
516
		buddy_idx = __find_buddy_index(page_idx, order);
		buddy = page + (buddy_idx - page_idx);
517
		if (!page_is_buddy(page, buddy, order))
518
			break;
Nick Piggin's avatar
Nick Piggin committed
519

520
		/* Our buddy is free, merge with it and move up one order. */
Linus Torvalds's avatar
Linus Torvalds committed
521
		list_del(&buddy->lru);
522
		zone->free_area[order].nr_free--;
Linus Torvalds's avatar
Linus Torvalds committed
523
		rmv_page_order(buddy);
524
		combined_idx = buddy_idx & page_idx;
Linus Torvalds's avatar
Linus Torvalds committed
525
526
527
528
529
		page = page + (combined_idx - page_idx);
		page_idx = combined_idx;
		order++;
	}
	set_page_order(page, order);
530
531
532
533
534
535
536
537
538

	/*
	 * If this is not the largest possible page, check if the buddy
	 * of the next-highest order is free. If it is, it's possible
	 * that pages are being freed that will coalesce soon. In case,
	 * that is happening, add the free page to the tail of the list
	 * so it's less likely to be used soon and more likely to be merged
	 * as a higher order page
	 */
539
	if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
540
		struct page *higher_page, *higher_buddy;
541
542
543
544
		combined_idx = buddy_idx & page_idx;
		higher_page = page + (combined_idx - page_idx);
		buddy_idx = __find_buddy_index(combined_idx, order + 1);
		higher_buddy = page + (buddy_idx - combined_idx);
545
546
547
548
549
550
551
552
553
		if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
			list_add_tail(&page->lru,
				&zone->free_area[order].free_list[migratetype]);
			goto out;
		}
	}

	list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
Linus Torvalds's avatar
Linus Torvalds committed
554
555
556
	zone->free_area[order].nr_free++;
}

557
558
559
560
561
562
563
564
565
566
567
/*
 * free_page_mlock() -- clean up attempts to free and mlocked() page.
 * Page should not be on lru, so no need to fix that up.
 * free_pages_check() will verify...
 */
static inline void free_page_mlock(struct page *page)
{
	__dec_zone_page_state(page, NR_MLOCK);
	__count_vm_event(UNEVICTABLE_MLOCKFREED);
}

568
static inline int free_pages_check(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
569
{
Nick Piggin's avatar
Nick Piggin committed
570
571
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
572
		(atomic_read(&page->_count) != 0) |
573
574
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
		(mem_cgroup_bad_page_check(page)))) {
575
		bad_page(page);
576
		return 1;
577
	}
578
579
580
	if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
		page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
581
582
583
}

/*
584
 * Frees a number of pages from the PCP lists
Linus Torvalds's avatar
Linus Torvalds committed
585
 * Assumes all pages on list are in same zone, and of same order.
586
 * count is the number of pages to free.
Linus Torvalds's avatar
Linus Torvalds committed
587
588
589
590
591
592
593
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
594
595
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
Linus Torvalds's avatar
Linus Torvalds committed
596
{
597
	int migratetype = 0;
598
	int batch_free = 0;
599
	int to_free = count;
600

Nick Piggin's avatar
Nick Piggin committed
601
	spin_lock(&zone->lock);
602
	zone->all_unreclaimable = 0;
Linus Torvalds's avatar
Linus Torvalds committed
603
	zone->pages_scanned = 0;
604

605
	while (to_free) {
Nick Piggin's avatar
Nick Piggin committed
606
		struct page *page;
607
608
609
		struct list_head *list;

		/*
610
611
612
613
614
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
615
616
		 */
		do {
617
			batch_free++;
618
619
620
621
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
Nick Piggin's avatar
Nick Piggin committed
622

623
624
625
626
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
			batch_free = to_free;

627
628
629
630
		do {
			page = list_entry(list->prev, struct page, lru);
			/* must delete as __free_one_page list manipulates */
			list_del(&page->lru);
631
632
633
			/* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
			__free_one_page(page, zone, 0, page_private(page));
			trace_mm_page_pcpu_drain(page, 0, page_private(page));
634
		} while (--to_free && --batch_free && !list_empty(list));
Linus Torvalds's avatar
Linus Torvalds committed
635
	}
636
	__mod_zone_page_state(zone, NR_FREE_PAGES, count);
Nick Piggin's avatar
Nick Piggin committed
637
	spin_unlock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
638
639
}

640
641
static void free_one_page(struct zone *zone, struct page *page, int order,
				int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
642
{
643
	spin_lock(&zone->lock);
644
	zone->all_unreclaimable = 0;
645
	zone->pages_scanned = 0;
646

647
	__free_one_page(page, zone, order, migratetype);
648
	__mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
649
	spin_unlock(&zone->lock);
Nick Piggin's avatar
Nick Piggin committed
650
651
}

652
static bool free_pages_prepare(struct page *page, unsigned int order)
Nick Piggin's avatar
Nick Piggin committed
653
{
Linus Torvalds's avatar
Linus Torvalds committed
654
	int i;
655
	int bad = 0;
Linus Torvalds's avatar
Linus Torvalds committed
656

657
	trace_mm_page_free_direct(page, order);
658
659
	kmemcheck_free_shadow(page, order);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
660
661
662
663
	if (PageAnon(page))
		page->mapping = NULL;
	for (i = 0; i < (1 << order); i++)
		bad += free_pages_check(page + i);
664
	if (bad)
665
		return false;
666

667
	if (!PageHighMem(page)) {
Nick Piggin's avatar
Nick Piggin committed
668
		debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
669
670
671
		debug_check_no_obj_freed(page_address(page),
					   PAGE_SIZE << order);
	}
Nick Piggin's avatar
Nick Piggin committed
672
	arch_free_page(page, order);
Nick Piggin's avatar
Nick Piggin committed
673
	kernel_map_pages(page, 1 << order, 0);
Nick Piggin's avatar
Nick Piggin committed
674

675
676
677
678
679
680
681
682
683
684
685
	return true;
}

static void __free_pages_ok(struct page *page, unsigned int order)
{
	unsigned long flags;
	int wasMlocked = __TestClearPageMlocked(page);

	if (!free_pages_prepare(page, order))
		return;

Nick Piggin's avatar
Nick Piggin committed
686
	local_irq_save(flags);
687
	if (unlikely(wasMlocked))
688
		free_page_mlock(page);
689
	__count_vm_events(PGFREE, 1 << order);
690
691
	free_one_page(page_zone(page), page, order,
					get_pageblock_migratetype(page));
Nick Piggin's avatar
Nick Piggin committed
692
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
693
694
}

695
696
697
/*
 * permit the bootmem allocator to evade page validation on high-order frees
 */
698
void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
699
700
701
702
{
	if (order == 0) {
		__ClearPageReserved(page);
		set_page_count(page, 0);
703
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
704
		__free_page(page);
705
706
707
	} else {
		int loop;

Nick Piggin's avatar
Nick Piggin committed
708
		prefetchw(page);
709
710
711
		for (loop = 0; loop < BITS_PER_LONG; loop++) {
			struct page *p = &page[loop];

Nick Piggin's avatar
Nick Piggin committed
712
713
			if (loop + 1 < BITS_PER_LONG)
				prefetchw(p + 1);
714
715
716
717
			__ClearPageReserved(p);
			set_page_count(p, 0);
		}

718
		set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
719
		__free_pages(page, order);
720
721
722
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
 * -- wli
 */
Nick Piggin's avatar
Nick Piggin committed
738
static inline void expand(struct zone *zone, struct page *page,
739
740
	int low, int high, struct free_area *area,
	int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
741
742
743
744
745
746
747
{
	unsigned long size = 1 << high;

	while (high > low) {
		area--;
		high--;
		size >>= 1;
Nick Piggin's avatar
Nick Piggin committed
748
		VM_BUG_ON(bad_range(zone, &page[size]));
749
		list_add(&page[size].lru, &area->free_list[migratetype]);
Linus Torvalds's avatar
Linus Torvalds committed
750
751
752
753
754
755
756
757
		area->nr_free++;
		set_page_order(&page[size], high);
	}
}

/*
 * This page is about to be returned from the page allocator
 */
758
static inline int check_new_page(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
759
{
Nick Piggin's avatar
Nick Piggin committed
760
761
	if (unlikely(page_mapcount(page) |
		(page->mapping != NULL)  |
762
		(atomic_read(&page->_count) != 0)  |
763
764
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
		(mem_cgroup_bad_page_check(page)))) {
765
		bad_page(page);
766
		return 1;
767
	}
768
769
770
771
772
773
774
775
776
777
778
779
	return 0;
}

static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
{
	int i;

	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;
		if (unlikely(check_new_page(p)))
			return 1;
	}
780

781
	set_page_private(page, 0);
782
	set_page_refcounted(page);
Nick Piggin's avatar
Nick Piggin committed
783
784

	arch_alloc_page(page, order);
Linus Torvalds's avatar
Linus Torvalds committed
785
	kernel_map_pages(page, 1 << order, 1);
Nick Piggin's avatar
Nick Piggin committed
786
787
788
789
790
791
792

	if (gfp_flags & __GFP_ZERO)
		prep_zero_page(page, order, gfp_flags);

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

793
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
794
795
}

796
797
798
799
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
800
801
static inline
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
						int migratetype)
{
	unsigned int current_order;
	struct free_area * area;
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
		if (list_empty(&area->free_list[migratetype]))
			continue;

		page = list_entry(area->free_list[migratetype].next,
							struct page, lru);
		list_del(&page->lru);
		rmv_page_order(page);
		area->nr_free--;
		expand(zone, page, order, current_order, area, migratetype);
		return page;
	}

	return NULL;
}


827
828
829
830
831
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
832
833
834
835
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
	[MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
836
837
};

838
839
/*
 * Move the free pages in a range to the free lists of the requested type.
840
 * Note that start_page and end_pages are not aligned on a pageblock
841
842
 * boundary. If alignment is required, use move_freepages_block()
 */
Adrian Bunk's avatar
Adrian Bunk committed
843
844
845
static int move_freepages(struct zone *zone,
			  struct page *start_page, struct page *end_page,
			  int migratetype)
846
847
848
{
	struct page *page;
	unsigned long order;
849
	int pages_moved = 0;
850
851
852
853
854
855
856

#ifndef CONFIG_HOLES_IN_ZONE
	/*
	 * page_zone is not safe to call in this context when
	 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
	 * anyway as we check zone boundaries in move_freepages_block().
	 * Remove at a later date when no bug reports exist related to
Mel Gorman's avatar
Mel Gorman committed
857
	 * grouping pages by mobility
858
859
860
861
862
	 */
	BUG_ON(page_zone(start_page) != page_zone(end_page));
#endif

	for (page = start_page; page <= end_page;) {
863
864
865
		/* Make sure we are not inadvertently changing nodes */
		VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));

866
867
868
869
870
871
872
873
874
875
876
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
			page++;
			continue;
		}

		order = page_order(page);
877
878
		list_move(&page->lru,
			  &zone->free_area[order].free_list[migratetype]);
879
		page += 1 << order;
880
		pages_moved += 1 << order;
881
882
	}

883
	return pages_moved;
884
885
}

Adrian Bunk's avatar
Adrian Bunk committed
886
887
static int move_freepages_block(struct zone *zone, struct page *page,
				int migratetype)
888
889
890
891
892
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

	start_pfn = page_to_pfn(page);
893
	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
894
	start_page = pfn_to_page(start_pfn);
895
896
	end_page = start_page + pageblock_nr_pages - 1;
	end_pfn = start_pfn + pageblock_nr_pages - 1;
897
898
899
900
901
902
903
904
905
906

	/* Do not cross zone boundaries */
	if (start_pfn < zone->zone_start_pfn)
		start_page = page;
	if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
		return 0;

	return move_freepages(zone, start_page, end_page, migratetype);
}

907
908
909
910
911
912
913
914
915
916
917
static void change_pageblock_range(struct page *pageblock_page,
					int start_order, int migratetype)
{
	int nr_pageblocks = 1 << (start_order - pageblock_order);

	while (nr_pageblocks--) {
		set_pageblock_migratetype(pageblock_page, migratetype);
		pageblock_page += pageblock_nr_pages;
	}
}

918
/* Remove an element from the buddy allocator from the fallback list */
919
920
static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
921
922
923
924
925
926
927
928
929
930
931
932
{
	struct free_area * area;
	int current_order;
	struct page *page;
	int migratetype, i;

	/* Find the largest possible block of pages in the other list */
	for (current_order = MAX_ORDER-1; current_order >= order;
						--current_order) {
		for (i = 0; i < MIGRATE_TYPES - 1; i++) {
			migratetype = fallbacks[start_migratetype][i];

933
934
935
			/* MIGRATE_RESERVE handled later if necessary */
			if (migratetype == MIGRATE_RESERVE)
				continue;
936

937
938
939
940
941
942
943
944
945
			area = &(zone->free_area[current_order]);
			if (list_empty(&area->free_list[migratetype]))
				continue;

			page = list_entry(area->free_list[migratetype].next,
					struct page, lru);
			area->nr_free--;

			/*
946
			 * If breaking a large block of pages, move all free
947
948
			 * pages to the preferred allocation list. If falling
			 * back for a reclaimable kernel allocation, be more
Lucas De Marchi's avatar
Lucas De Marchi committed
949
			 * aggressive about taking ownership of free pages
950
			 */
951
			if (unlikely(current_order >= (pageblock_order >> 1)) ||
952
953
					start_migratetype == MIGRATE_RECLAIMABLE ||
					page_group_by_mobility_disabled) {
954
955
956
957
958
				unsigned long pages;
				pages = move_freepages_block(zone, page,
								start_migratetype);

				/* Claim the whole block if over half of it is free */
959
960
				if (pages >= (1 << (pageblock_order-1)) ||
						page_group_by_mobility_disabled)
961
962
963
					set_pageblock_migratetype(page,
								start_migratetype);

964
				migratetype = start_migratetype;
965
			}
966
967
968
969
970

			/* Remove the page from the freelists */
			list_del(&page->lru);
			rmv_page_order(page);

971
972
973
			/* Take ownership for orders >= pageblock_order */
			if (current_order >= pageblock_order)
				change_pageblock_range(page, current_order,
974
975
976
							start_migratetype);

			expand(zone, page, order, current_order, area, migratetype);
977
978
979
980

			trace_mm_page_alloc_extfrag(page, order, current_order,
				start_migratetype, migratetype);

981
982
983
984
			return page;
		}
	}

985
	return NULL;
986
987
}

988
/*
Linus Torvalds's avatar
Linus Torvalds committed
989
990
991
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
992
993
static struct page *__rmqueue(struct zone *zone, unsigned int order,
						int migratetype)
Linus Torvalds's avatar
Linus Torvalds committed
994
995
996
{
	struct page *page;

997
retry_reserve:
998
	page = __rmqueue_smallest(zone, order, migratetype);
999

1000
	if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1001
		page = __rmqueue_fallback(zone, order, migratetype);
1002

1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
		/*
		 * Use MIGRATE_RESERVE rather than fail an allocation. goto
		 * is used because __rmqueue_smallest is an inline function
		 * and we want just one call site
		 */
		if (!page) {
			migratetype = MIGRATE_RESERVE;
			goto retry_reserve;
		}
	}

1014
	trace_mm_page_alloc_zone_locked(page, order, migratetype);
1015
	return page;
Linus Torvalds's avatar
Linus Torvalds committed
1016
1017
1018
1019
1020
1021
1022
1023
}

/* 
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
static int rmqueue_bulk(struct zone *zone, unsigned int order, 
1024
			unsigned long count, struct list_head *list,
1025
			int migratetype, int cold)
Linus Torvalds's avatar
Linus Torvalds committed
1026
1027
1028
{
	int i;
	
Nick Piggin's avatar
Nick Piggin committed
1029
	spin_lock(&zone->lock);
Linus Torvalds's avatar
Linus Torvalds committed
1030
	for (i = 0; i < count; ++i) {
1031
		struct page *page = __rmqueue(zone, order, migratetype);
Nick Piggin's avatar
Nick Piggin committed
1032
		if (unlikely(page == NULL))
Linus Torvalds's avatar
Linus Torvalds committed
1033
			break;