mm.h 52.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
13
#include <linux/debug_locks.h>
14
#include <linux/mm_types.h>
15
#include <linux/range.h>
16
#include <linux/pfn.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
17
#include <linux/bit_spinlock.h>
Linus Torvalds's avatar
Linus Torvalds committed
18 19 20

struct mempolicy;
struct anon_vma;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
21
struct file_ra_state;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
22
struct user_struct;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
23
struct writeback_control;
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27 28 29

#ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
#endif

extern unsigned long num_physpages;
30
extern unsigned long totalram_pages;
Linus Torvalds's avatar
Linus Torvalds committed
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

46 47 48
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55 56 57
/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

58 59
extern struct kmem_cache *vm_area_cachep;

Linus Torvalds's avatar
Linus Torvalds committed
60
#ifndef CONFIG_MMU
61 62
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
Linus Torvalds's avatar
Linus Torvalds committed
63 64 65 66 67

extern unsigned int kobjsize(const void *objp);
#endif

/*
Hugh Dickins's avatar
Hugh Dickins committed
68
 * vm_flags in vm_area_struct, see mm_types.h.
Linus Torvalds's avatar
Linus Torvalds committed
69 70 71 72 73 74
 */
#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

75
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
Linus Torvalds's avatar
Linus Torvalds committed
76 77 78 79 80 81
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
82
#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
Linus Torvalds's avatar
Linus Torvalds committed
83
#define VM_GROWSUP	0x00000200
84 85 86
#else
#define VM_GROWSUP	0x00000000
#endif
87
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91 92 93 94 95 96 97 98 99
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */

#define VM_EXECUTABLE	0x00001000
#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
Hugh Dickins's avatar
Hugh Dickins committed
100
#define VM_RESERVED	0x00080000	/* Count as reserved_vm like IO */
Linus Torvalds's avatar
Linus Torvalds committed
101
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
102
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
Linus Torvalds's avatar
Linus Torvalds committed
103 104 105
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
#define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
106
#define VM_INSERTPAGE	0x02000000	/* The vma has had "vm_insert_page()" done on it */
Roland McGrath's avatar
Roland McGrath committed
107
#define VM_ALWAYSDUMP	0x04000000	/* Always include in core dumps */
108

Nick Piggin's avatar
Nick Piggin committed
109
#define VM_CAN_NONLINEAR 0x08000000	/* Has ->fault & does nonlinear pages */
Jared Hulbert's avatar
Jared Hulbert committed
110
#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
111
#define VM_SAO		0x20000000	/* Strong Access Ordering (powerpc) */
112
#define VM_PFN_AT_MMAP	0x40000000	/* PFNMAP vma that is fully mapped at mmap time */
Hugh Dickins's avatar
Hugh Dickins committed
113
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
Linus Torvalds's avatar
Linus Torvalds committed
114

115 116 117
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

Linus Torvalds's avatar
Linus Torvalds committed
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#else
#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#endif

#define VM_READHINTMASK			(VM_SEQ_READ | VM_RAND_READ)
#define VM_ClearReadHint(v)		(v)->vm_flags &= ~VM_READHINTMASK
#define VM_NormalReadHint(v)		(!((v)->vm_flags & VM_READHINTMASK))
#define VM_SequentialReadHint(v)	((v)->vm_flags & VM_SEQ_READ)
#define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)

134 135 136 137 138
/*
 * special vmas that are non-mergable, non-mlock()able
 */
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)

Linus Torvalds's avatar
Linus Torvalds committed
139 140 141 142 143 144
/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

Nick Piggin's avatar
Nick Piggin committed
145 146
#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
147
#define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
148
#define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
Nick Piggin's avatar
Nick Piggin committed
149

150 151 152 153 154 155 156 157
/*
 * This interface is used by x86 PAT code to identify a pfn mapping that is
 * linear over entire vma. This is to optimize PAT code that deals with
 * marking the physical region with a particular prot. This is not for generic
 * mm use. Note also that this check will not work if the pfn mapping is
 * linear for a vma starting at physical address 0. In which case PAT code
 * falls back to slow path of reserving physical range page by page.
 */
158 159
static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
{
160
	return (vma->vm_flags & VM_PFN_AT_MMAP);
161 162 163 164 165 166
}

static inline int is_pfn_mapping(struct vm_area_struct *vma)
{
	return (vma->vm_flags & VM_PFNMAP);
}
Nick Piggin's avatar
Nick Piggin committed
167

168
/*
Nick Piggin's avatar
Nick Piggin committed
169
 * vm_fault is filled by the the pagefault handler and passed to the vma's
Nick Piggin's avatar
Nick Piggin committed
170 171
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
172
 *
Nick Piggin's avatar
Nick Piggin committed
173 174 175
 * pgoff should be used in favour of virtual_address, if possible. If pgoff
 * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
 * mapping support.
176
 */
Nick Piggin's avatar
Nick Piggin committed
177 178 179 180 181 182
struct vm_fault {
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
	pgoff_t pgoff;			/* Logical page offset based on vma */
	void __user *virtual_address;	/* Faulting virtual address */

	struct page *page;		/* ->fault handlers should return a
Nick Piggin's avatar
Nick Piggin committed
183
					 * page here, unless VM_FAULT_NOPAGE
Nick Piggin's avatar
Nick Piggin committed
184
					 * is set (which is also implied by
Nick Piggin's avatar
Nick Piggin committed
185
					 * VM_FAULT_ERROR).
Nick Piggin's avatar
Nick Piggin committed
186
					 */
187
};
Linus Torvalds's avatar
Linus Torvalds committed
188 189 190 191 192 193 194 195 196

/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
Nick Piggin's avatar
Nick Piggin committed
197
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
198 199 200

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
201
	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
202 203 204 205 206 207

	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
208
#ifdef CONFIG_NUMA
209 210 211 212 213 214 215
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
216
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
217 218 219 220 221 222 223 224 225 226 227

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
228 229
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
230 231
	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
		const nodemask_t *to, unsigned long flags);
Linus Torvalds's avatar
Linus Torvalds committed
232 233 234 235 236 237
#endif
};

struct mmu_gather;
struct inode;

Andrew Morton's avatar
Andrew Morton committed
238 239
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))
240

Linus Torvalds's avatar
Linus Torvalds committed
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260
/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
Nick Piggin's avatar
Nick Piggin committed
261
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
Linus Torvalds's avatar
Linus Torvalds committed
262
 */
263 264
static inline int put_page_testzero(struct page *page)
{
Nick Piggin's avatar
Nick Piggin committed
265
	VM_BUG_ON(atomic_read(&page->_count) == 0);
266
	return atomic_dec_and_test(&page->_count);
267
}
Linus Torvalds's avatar
Linus Torvalds committed
268 269

/*
270 271
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
Linus Torvalds's avatar
Linus Torvalds committed
272
 */
273 274
static inline int get_page_unless_zero(struct page *page)
{
275
	return atomic_inc_not_zero(&page->_count);
276
}
Linus Torvalds's avatar
Linus Torvalds committed
277

278 279
extern int page_is_ram(unsigned long pfn);

280
/* Support for virtually mapped pages */
281 282
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
283

284 285 286 287 288 289
/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
290 291
static inline int is_vmalloc_addr(const void *x)
{
292
#ifdef CONFIG_MMU
293 294 295
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
296 297
#else
	return 0;
298
#endif
299
}
300 301 302
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
303
static inline int is_vmalloc_or_module_addr(const void *x)
304 305 306 307
{
	return 0;
}
#endif
308

Andrea Arcangeli's avatar
Andrea Arcangeli committed
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}

static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}

static inline unsigned long compound_lock_irqsave(struct page *page)
{
	unsigned long uninitialized_var(flags);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	local_irq_save(flags);
	compound_lock(page);
#endif
	return flags;
}

static inline void compound_unlock_irqrestore(struct page *page,
					      unsigned long flags)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	compound_unlock(page);
	local_irq_restore(flags);
#endif
}

342 343
static inline struct page *compound_head(struct page *page)
{
344
	if (unlikely(PageTail(page)))
345 346 347 348
		return page->first_page;
	return page;
}

349
static inline int page_count(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
350
{
351
	return atomic_read(&compound_head(page)->_count);
Linus Torvalds's avatar
Linus Torvalds committed
352 353 354 355
}

static inline void get_page(struct page *page)
{
356 357 358 359 360 361 362 363 364
	/*
	 * Getting a normal page or the head of a compound page
	 * requires to already have an elevated page->_count. Only if
	 * we're getting a tail page, the elevated page->_count is
	 * required only in the head page, so for tail pages the
	 * bugcheck only verifies that the page->_count isn't
	 * negative.
	 */
	VM_BUG_ON(atomic_read(&page->_count) < !PageTail(page));
Linus Torvalds's avatar
Linus Torvalds committed
365
	atomic_inc(&page->_count);
366 367 368 369 370 371 372 373 374 375 376 377 378
	/*
	 * Getting a tail page will elevate both the head and tail
	 * page->_count(s).
	 */
	if (unlikely(PageTail(page))) {
		/*
		 * This is safe only because
		 * __split_huge_page_refcount can't run under
		 * get_page().
		 */
		VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
		atomic_inc(&page->first_page->_count);
	}
Linus Torvalds's avatar
Linus Torvalds committed
379 380
}

381 382 383 384 385 386
static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
	return compound_head(page);
}

387 388 389 390 391 392 393 394 395
/*
 * Setup the page count before being freed into the page allocator for
 * the first time (boot or memory hotplug)
 */
static inline void init_page_count(struct page *page)
{
	atomic_set(&page->_count, 1);
}

Linus Torvalds's avatar
Linus Torvalds committed
396
void put_page(struct page *page);
397
void put_pages_list(struct list_head *pages);
Linus Torvalds's avatar
Linus Torvalds committed
398

399
void split_page(struct page *page, unsigned int order);
400
int split_free_page(struct page *page);
401

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
 * These are _only_ valid on the head of a PG_compound page.
 */
typedef void compound_page_dtor(struct page *);

static inline void set_compound_page_dtor(struct page *page,
						compound_page_dtor *dtor)
{
	page[1].lru.next = (void *)dtor;
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
	return (compound_page_dtor *)page[1].lru.next;
}

420 421
static inline int compound_order(struct page *page)
{
422
	if (!PageHead(page))
423 424 425 426 427 428 429 430 431
		return 0;
	return (unsigned long)page[1].lru.prev;
}

static inline void set_compound_order(struct page *page, unsigned long order)
{
	page[1].lru.prev = (void *)order;
}

Linus Torvalds's avatar
Linus Torvalds committed
432 433 434 435 436 437 438
/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
439 440
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
Nick Piggin's avatar
Nick Piggin committed
441
 *   page_count() > 0  means the page has been allocated.
Linus Torvalds's avatar
Linus Torvalds committed
442
 *
Nick Piggin's avatar
Nick Piggin committed
443 444 445 446 447
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
Linus Torvalds's avatar
Linus Torvalds committed
448
 *
Nick Piggin's avatar
Nick Piggin committed
449 450 451 452 453 454 455 456 457
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
Linus Torvalds's avatar
Linus Torvalds committed
458 459 460
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
Nick Piggin's avatar
Nick Piggin committed
461 462 463 464
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
Linus Torvalds's avatar
Linus Torvalds committed
465
 *
Nick Piggin's avatar
Nick Piggin committed
466 467 468
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
 * in units of PAGE_CACHE_SIZE.
Linus Torvalds's avatar
Linus Torvalds committed
469
 *
Nick Piggin's avatar
Nick Piggin committed
470 471 472
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
Linus Torvalds's avatar
Linus Torvalds committed
473
 *
Nick Piggin's avatar
Nick Piggin committed
474 475 476
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
Linus Torvalds's avatar
Linus Torvalds committed
477
 *
Nick Piggin's avatar
Nick Piggin committed
478 479 480 481
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
Linus Torvalds's avatar
Linus Torvalds committed
482
 *
Nick Piggin's avatar
Nick Piggin committed
483
 * All pagecache pages may be subject to I/O:
Linus Torvalds's avatar
Linus Torvalds committed
484 485
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
Nick Piggin's avatar
Nick Piggin committed
486 487 488 489
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
Linus Torvalds's avatar
Linus Torvalds committed
490 491 492 493 494 495
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
496

497 498 499 500 501 502 503 504 505 506 507

/*
 * page->flags layout:
 *
 * There are three possibilities for how page->flags get
 * laid out.  The first is for the normal case, without
 * sparsemem.  The second is for sparsemem when there is
 * plenty of space for node and section.  The last is when
 * we have run out of space and have to fall back to an
 * alternate (slower) way of determining the node.
 *
508 509 510
 * No sparsemem or sparsemem vmemmap: |       NODE     | ZONE | ... | FLAGS |
 * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
 * classic sparse no space for node:  | SECTION |     ZONE    | ... | FLAGS |
511
 */
512
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
513 514 515 516 517 518 519
#define SECTIONS_WIDTH		SECTIONS_SHIFT
#else
#define SECTIONS_WIDTH		0
#endif

#define ZONES_WIDTH		ZONES_SHIFT

520
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
521 522
#define NODES_WIDTH		NODES_SHIFT
#else
523 524 525
#ifdef CONFIG_SPARSEMEM_VMEMMAP
#error "Vmemmap: No space for nodes field in page flags"
#endif
526 527 528 529
#define NODES_WIDTH		0
#endif

/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
530
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
531 532 533 534 535 536 537
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)

/*
 * We are going to use the flags for the page to node mapping if its in
 * there.  This includes the case where there is no node, so it is implicit.
 */
538 539 540
#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
#define NODE_NOT_IN_PAGE_FLAGS
#endif
541 542 543 544

#ifndef PFN_SECTION_SHIFT
#define PFN_SECTION_SHIFT 0
#endif
545 546 547 548 549 550

/*
 * Define the bit shifts to access each section.  For non-existant
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
551 552 553
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
554

555 556
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
557
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
558 559
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
560
#else
561
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
562 563
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
564 565
#endif

566
#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
567

568 569
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
570 571
#endif

572 573 574
#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
575
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
576

577
static inline enum zone_type page_zonenum(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
578
{
579
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
580 581
}

582 583 584 585 586 587 588 589
/*
 * The identification function is only used by the buddy allocator for
 * determining if two pages could be buddies. We are not really
 * identifying a zone since we could be using a the section number
 * id if we have not node id available in page flags.
 * We guarantee only that it will return the same value for two
 * combinable pages in a zone.
 */
590 591
static inline int page_zone_id(struct page *page)
{
592
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
593 594
}

595
static inline int zone_to_nid(struct zone *zone)
596
{
597 598 599 600 601
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
602 603
}

604
#ifdef NODE_NOT_IN_PAGE_FLAGS
605
extern int page_to_nid(struct page *page);
606
#else
607
static inline int page_to_nid(struct page *page)
608
{
609
	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
610
}
611 612 613 614 615 616 617
#endif

static inline struct zone *page_zone(struct page *page)
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

618
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
619 620 621 622
static inline unsigned long page_to_section(struct page *page)
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
623
#endif
624

625
static inline void set_page_zone(struct page *page, enum zone_type zone)
626 627 628 629
{
	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
630

631 632 633 634
static inline void set_page_node(struct page *page, unsigned long node)
{
	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
635
}
636

637 638 639 640 641
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}
Linus Torvalds's avatar
Linus Torvalds committed
642

643
static inline void set_page_links(struct page *page, enum zone_type zone,
644
	unsigned long node, unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
645
{
646 647
	set_page_zone(page, zone);
	set_page_node(page, node);
648
	set_page_section(page, pfn_to_section_nr(pfn));
Linus Torvalds's avatar
Linus Torvalds committed
649 650
}

651 652 653 654 655
/*
 * Some inline functions in vmstat.h depend on page_zone()
 */
#include <linux/vmstat.h>

656
static __always_inline void *lowmem_page_address(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
657
{
658
	return __va(PFN_PHYS(page_to_pfn(page)));
Linus Torvalds's avatar
Linus Torvalds committed
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
}

#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif

#if defined(WANT_PAGE_VIRTUAL)
#define page_address(page) ((page)->virtual)
#define set_page_address(page, address)			\
	do {						\
		(page)->virtual = (address);		\
	} while(0)
#define page_address_init()  do { } while(0)
#endif

#if defined(HASHED_PAGE_VIRTUAL)
void *page_address(struct page *page);
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif

#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address)  do { } while(0)
#define page_address_init()  do { } while(0)
#endif

/*
 * On an anonymous page mapped into a user virtual memory area,
 * page->mapping points to its anon_vma, not to a struct address_space;
Hugh Dickins's avatar
Hugh Dickins committed
689 690 691 692 693 694 695 696
 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 *
 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
 * and then page->mapping points, not to an anon_vma, but to a private
 * structure which KSM associates with that merged page.  See ksm.h.
 *
 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
Linus Torvalds's avatar
Linus Torvalds committed
697 698 699 700 701 702
 *
 * Please note that, confusingly, "page_mapping" refers to the inode
 * address_space which maps the page from disk; whereas "page_mapped"
 * refers to user virtual address space into which the page is mapped.
 */
#define PAGE_MAPPING_ANON	1
Hugh Dickins's avatar
Hugh Dickins committed
703 704
#define PAGE_MAPPING_KSM	2
#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
Linus Torvalds's avatar
Linus Torvalds committed
705 706 707 708 709 710

extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
{
	struct address_space *mapping = page->mapping;

711
	VM_BUG_ON(PageSlab(page));
Linus Torvalds's avatar
Linus Torvalds committed
712 713
	if (unlikely(PageSwapCache(page)))
		mapping = &swapper_space;
714
	else if ((unsigned long)mapping & PAGE_MAPPING_ANON)
Linus Torvalds's avatar
Linus Torvalds committed
715 716 717 718
		mapping = NULL;
	return mapping;
}

Hugh Dickins's avatar
Hugh Dickins committed
719 720 721 722 723 724
/* Neutral page->mapping pointer to address_space or anon_vma or other */
static inline void *page_rmapping(struct page *page)
{
	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}

Linus Torvalds's avatar
Linus Torvalds committed
725 726 727 728 729 730 731 732 733 734 735 736
static inline int PageAnon(struct page *page)
{
	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
}

/*
 * Return the pagecache index of the passed page.  Regular pagecache pages
 * use ->index whereas swapcache pages use ->private
 */
static inline pgoff_t page_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
737
		return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
	return page->index;
}

/*
 * The atomic page->_mapcount, like _count, starts from -1:
 * so that transitions both from it and to it can be tracked,
 * using atomic_inc_and_test and atomic_add_negative(-1).
 */
static inline void reset_page_mapcount(struct page *page)
{
	atomic_set(&(page)->_mapcount, -1);
}

static inline int page_mapcount(struct page *page)
{
	return atomic_read(&(page)->_mapcount) + 1;
}

/*
 * Return true if this page is mapped into pagetables.
 */
static inline int page_mapped(struct page *page)
{
	return atomic_read(&(page)->_mapcount) >= 0;
}

/*
 * Different kinds of faults, as returned by handle_mm_fault().
 * Used to decide whether a process gets delivered SIGBUS or
 * just gets major/minor fault counters bumped up.
 */
Nick Piggin's avatar
Nick Piggin committed
769

Nick Piggin's avatar
Nick Piggin committed
770
#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
Nick Piggin's avatar
Nick Piggin committed
771

Nick Piggin's avatar
Nick Piggin committed
772 773 774 775
#define VM_FAULT_OOM	0x0001
#define VM_FAULT_SIGBUS	0x0002
#define VM_FAULT_MAJOR	0x0004
#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
776 777
#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
Nick Piggin's avatar
Nick Piggin committed
778

Nick Piggin's avatar
Nick Piggin committed
779 780
#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
781
#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
Linus Torvalds's avatar
Linus Torvalds committed
782

783 784 785 786 787 788 789 790
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */

#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
			 VM_FAULT_HWPOISON_LARGE)

/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
Nick Piggin's avatar
Nick Piggin committed
791

792 793 794 795 796
/*
 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
 */
extern void pagefault_out_of_memory(void);

Linus Torvalds's avatar
Linus Torvalds committed
797 798 799 800
#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)

extern void show_free_areas(void);

Hugh Dickins's avatar
Hugh Dickins committed
801
int shmem_lock(struct file *file, int lock, struct user_struct *user);
802
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
Linus Torvalds's avatar
Linus Torvalds committed
803 804
int shmem_zero_setup(struct vm_area_struct *);

805 806 807 808 809 810 811 812
#ifndef CONFIG_MMU
extern unsigned long shmem_get_unmapped_area(struct file *file,
					     unsigned long addr,
					     unsigned long len,
					     unsigned long pgoff,
					     unsigned long flags);
#endif

Alexey Dobriyan's avatar
Alexey Dobriyan committed
813
extern int can_do_mlock(void);
Linus Torvalds's avatar
Linus Torvalds committed
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);

/*
 * Parameter block passed down to zap_pte_range in exceptional cases.
 */
struct zap_details {
	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
	struct address_space *check_mapping;	/* Check page->mapping if set */
	pgoff_t	first_index;			/* Lowest page->index to unmap */
	pgoff_t last_index;			/* Highest page->index to unmap */
	spinlock_t *i_mmap_lock;		/* For unmap_mapping_range: */
	unsigned long truncate_count;		/* Compare vm_truncate_count */
};

829 830 831
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
		pte_t pte);

832 833
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
		unsigned long size);
834
unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
Linus Torvalds's avatar
Linus Torvalds committed
835
		unsigned long size, struct zap_details *);
836
unsigned long unmap_vmas(struct mmu_gather **tlb,
Linus Torvalds's avatar
Linus Torvalds committed
837 838 839
		struct vm_area_struct *start_vma, unsigned long start_addr,
		unsigned long end_addr, unsigned long *nr_accounted,
		struct zap_details *);
840 841 842 843 844 845 846 847

/**
 * mm_walk - callbacks for walk_page_range
 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
 * @pte_hole: if set, called for each hole at all levels
848
 * @hugetlb_entry: if set, called for each hugetlb entry
849 850 851 852
 *
 * (see walk_page_range for more details)
 */
struct mm_walk {
853 854 855 856 857
	int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
	int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
858 859
	int (*hugetlb_entry)(pte_t *, unsigned long,
			     unsigned long, unsigned long, struct mm_walk *);
860 861
	struct mm_struct *mm;
	void *private;
862 863
};

864 865
int walk_page_range(unsigned long addr, unsigned long end,
		struct mm_walk *walk);
866
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
867
		unsigned long end, unsigned long floor, unsigned long ceiling);
Linus Torvalds's avatar
Linus Torvalds committed
868 869 870 871
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen, int even_cows);
Johannes Weiner's avatar
Johannes Weiner committed
872 873
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn);
874 875
int follow_phys(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags, unsigned long *prot, resource_size_t *phys);
876 877
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
			void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
878 879 880 881 882 883 884

static inline void unmap_shared_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen)
{
	unmap_mapping_range(mapping, holebegin, holelen, 0);
}

npiggin@suse.de's avatar
npiggin@suse.de committed
885
extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
886
extern void truncate_setsize(struct inode *inode, loff_t newsize);
npiggin@suse.de's avatar
npiggin@suse.de committed
887 888
extern int vmtruncate(struct inode *inode, loff_t offset);
extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
Nick Piggin's avatar
Nick Piggin committed
889

890
int truncate_inode_page(struct address_space *mapping, struct page *page);
891
int generic_error_remove_page(struct address_space *mapping, struct page *page);
892

893 894
int invalidate_inode_page(struct page *page);

895
#ifdef CONFIG_MMU
Nick Piggin's avatar
Nick Piggin committed
896
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
897
			unsigned long address, unsigned int flags);
898 899 900
#else
static inline int handle_mm_fault(struct mm_struct *mm,
			struct vm_area_struct *vma, unsigned long address,
901
			unsigned int flags)
902 903 904 905 906 907
{
	/* should never happen if there's no MMU */
	BUG();
	return VM_FAULT_SIGBUS;
}
#endif
Nick Piggin's avatar
Nick Piggin committed
908

Linus Torvalds's avatar
Linus Torvalds committed
909 910 911
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);

912
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
913
			unsigned long start, int nr_pages, int write, int force,
914 915 916
			struct page **pages, struct vm_area_struct **vmas);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
			struct page **pages);
Hugh Dickins's avatar
Hugh Dickins committed
917
struct page *get_dump_page(unsigned long addr);
Linus Torvalds's avatar
Linus Torvalds committed
918

919 920 921
extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned long offset);

Linus Torvalds's avatar
Linus Torvalds committed
922
int __set_page_dirty_nobuffers(struct page *page);
923
int __set_page_dirty_no_writeback(struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
924 925
int redirty_page_for_writepage(struct writeback_control *wbc,
				struct page *page);
926
void account_page_dirtied(struct page *page, struct address_space *mapping);
927
void account_page_writeback(struct page *page);
928
int set_page_dirty(struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
929 930 931
int set_page_dirty_lock(struct page *page);
int clear_page_dirty_for_io(struct page *page);

932 933 934 935 936 937
/* Is the vma a continuation of the stack vma above it? */
static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
{
	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
}

938 939 940
extern unsigned long move_page_tables(struct vm_area_struct *vma,
		unsigned long old_addr, struct vm_area_struct *new_vma,
		unsigned long new_addr, unsigned long len);
Linus Torvalds's avatar
Linus Torvalds committed
941 942 943
extern unsigned long do_mremap(unsigned long addr,
			       unsigned long old_len, unsigned long new_len,
			       unsigned long flags, unsigned long new_addr);
944 945 946
extern int mprotect_fixup(struct vm_area_struct *vma,
			  struct vm_area_struct **pprev, unsigned long start,
			  unsigned long end, unsigned long newflags);
Linus Torvalds's avatar
Linus Torvalds committed
947

948 949 950 951 952
/*
 * doesn't attempt to fault and will return short.
 */
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
			  struct page **pages);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
953 954 955
/*
 * per-process(per-mm_struct) statistics.
 */
956
#if defined(SPLIT_RSS_COUNTING)
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
957 958 959 960 961 962 963 964 965
/*
 * The mm counters are not protected by its page_table_lock,
 * so must be incremented atomically.
 */
static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
{
	atomic_long_set(&mm->rss_stat.count[member], value);
}

966
unsigned long get_mm_counter(struct mm_struct *mm, int member);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053

static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
	atomic_long_add(value, &mm->rss_stat.count[member]);
}

static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
	atomic_long_inc(&mm->rss_stat.count[member]);
}

static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
	atomic_long_dec(&mm->rss_stat.count[member]);
}

#else  /* !USE_SPLIT_PTLOCKS */
/*
 * The mm counters are protected by its page_table_lock,
 * so can be incremented directly.
 */
static inline void set_mm_counter(struct mm_struct *mm, int member, long value)
{
	mm->rss_stat.count[member] = value;
}

static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
{
	return mm->rss_stat.count[member];
}

static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
	mm->rss_stat.count[member] += value;
}

static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
	mm->rss_stat.count[member]++;
}

static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
	mm->rss_stat.count[member]--;
}

#endif /* !USE_SPLIT_PTLOCKS */

static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
	return get_mm_counter(mm, MM_FILEPAGES) +
		get_mm_counter(mm, MM_ANONPAGES);
}

static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
	return max(mm->hiwater_rss, get_mm_rss(mm));
}

static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
	return max(mm->hiwater_vm, mm->total_vm);
}

static inline void update_hiwater_rss(struct mm_struct *mm)
{
	unsigned long _rss = get_mm_rss(mm);

	if ((mm)->hiwater_rss < _rss)
		(mm)->hiwater_rss = _rss;
}

static inline void update_hiwater_vm(struct mm_struct *mm)
{
	if (mm->hiwater_vm < mm->total_vm)
		mm->hiwater_vm = mm->total_vm;
}

static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
					 struct mm_struct *mm)
{
	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);

	if (*maxrss < hiwater_rss)
		*maxrss = hiwater_rss;
}

KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1054
#if defined(SPLIT_RSS_COUNTING)
1055
void sync_mm_rss(struct task_struct *task, struct mm_struct *mm);
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
1056 1057 1058 1059 1060
#else
static inline void sync_mm_rss(struct task_struct *task, struct mm_struct *mm)
{
}
#endif
1061

Linus Torvalds's avatar
Linus Torvalds committed
1062
/*
1063
 * A callback you can register to apply pressure to ageable caches.
Linus Torvalds's avatar
Linus Torvalds committed
1064
 *
1065 1066 1067 1068 1069
 * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'.  It should
 * look through the least-recently-used 'nr_to_scan' entries and
 * attempt to free them up.  It should return the number of objects
 * which remain in the cache.  If it returns -1, it means it cannot do
 * any scanning at this time (eg. there is a risk of deadlock).
Linus Torvalds's avatar
Linus Torvalds committed
1070
 *
1071 1072 1073 1074 1075
 * The 'gfpmask' refers to the allocation we are currently trying to
 * fulfil.
 *
 * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
 * querying the cache size, so a fastpath for that case is appropriate.
Linus Torvalds's avatar
Linus Torvalds committed
1076
 */
1077
struct shrinker {
1078
	int (*shrink)(struct shrinker *, int nr_to_scan, gfp_t gfp_mask);
1079
	int seeks;	/* seeks to recreate an obj */
Linus Torvalds's avatar
Linus Torvalds committed
1080

1081 1082 1083 1084 1085 1086 1087
	/* These are for internal use */
	struct list_head list;
	long nr;	/* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);
extern void unregister_shrinker(struct shrinker *);
Linus Torvalds's avatar
Linus Torvalds committed
1088

Alexey Dobriyan's avatar
Alexey Dobriyan committed
1089
int vma_wants_writenotify(struct vm_area_struct *vma);
1090

1091 1092 1093 1094 1095 1096 1097 1098 1099
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
			       spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
				    spinlock_t **ptl)
{
	pte_t *ptep;
	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
	return ptep;
}
1100

Nick Piggin's avatar
Nick Piggin committed
1101 1102 1103 1104 1105 1106 1107
#ifdef __PAGETABLE_PUD_FOLDED
static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
						unsigned long address)
{
	return 0;
}
#else
1108
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
Nick Piggin's avatar
Nick Piggin committed
1109 1110 1111 1112 1113 1114 1115 1116 1117
#endif

#ifdef __PAGETABLE_PMD_FOLDED
static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
						unsigned long address)
{
	return 0;
}
#else
1118
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
Nick Piggin's avatar
Nick Piggin committed
1119 1120
#endif

1121 1122 1123
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
int __pte_alloc_kernel(pmd_t *pmd<