shmem.c 89.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8
/*
 * Resizable virtual memory filesystem for Linux.
 *
 * Copyright (C) 2000 Linus Torvalds.
 *		 2000 Transmeta Corp.
 *		 2000-2001 Christoph Rohland
 *		 2000-2001 SAP AG
 *		 2002 Red Hat Inc.
9 10
 * Copyright (C) 2002-2011 Hugh Dickins.
 * Copyright (C) 2011 Google Inc.
11
 * Copyright (C) 2002-2005 VERITAS Software Corporation.
Linus Torvalds's avatar
Linus Torvalds committed
12 13 14 15 16 17
 * Copyright (C) 2004 Andi Kleen, SuSE Labs
 *
 * Extended attribute support for tmpfs:
 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
 *
18 19 20
 * tiny-shmem:
 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
 *
Linus Torvalds's avatar
Linus Torvalds committed
21 22 23
 * This file is released under the GPL.
 */

24 25 26 27
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/vfs.h>
#include <linux/mount.h>
Andrew Morton's avatar
Andrew Morton committed
28
#include <linux/ramfs.h>
Hugh Dickins's avatar
Hugh Dickins committed
29
#include <linux/pagemap.h>
30 31
#include <linux/file.h>
#include <linux/mm.h>
32
#include <linux/export.h>
33
#include <linux/swap.h>
34
#include <linux/aio.h>
35 36 37 38

static struct vfsmount *shm_mnt;

#ifdef CONFIG_SHMEM
Linus Torvalds's avatar
Linus Torvalds committed
39 40 41 42 43 44
/*
 * This virtual memory filesystem is heavily based on the ramfs. It
 * extends ramfs by the ability to use swap and honor resource limits
 * which makes it a completely usable filesystem.
 */

45
#include <linux/xattr.h>
46
#include <linux/exportfs.h>
47
#include <linux/posix_acl.h>
Christoph Hellwig's avatar
Christoph Hellwig committed
48
#include <linux/posix_acl_xattr.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50 51 52 53 54 55
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
56
#include <linux/pagevec.h>
57
#include <linux/percpu_counter.h>
58
#include <linux/falloc.h>
59
#include <linux/splice.h>
Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63
#include <linux/security.h>
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
64
#include <linux/ctype.h>
65
#include <linux/migrate.h>
66
#include <linux/highmem.h>
67
#include <linux/seq_file.h>
Mimi Zohar's avatar
Mimi Zohar committed
68
#include <linux/magic.h>
69
#include <linux/syscalls.h>
David Herrmann's avatar
David Herrmann committed
70
#include <linux/fcntl.h>
71
#include <uapi/linux/memfd.h>
72

Linus Torvalds's avatar
Linus Torvalds committed
73 74 75
#include <asm/uaccess.h>
#include <asm/pgtable.h>

Hugh Dickins's avatar
Hugh Dickins committed
76
#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79 80 81
#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)

/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20

82 83 84
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
#define SHORT_SYMLINK_LEN 128

85
/*
86 87 88
 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
 * inode->i_private (with i_mutex making sure that it has only one user at
 * a time): we would prefer not to enlarge the shmem inode just for that.
89 90
 */
struct shmem_falloc {
91
	wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
92 93 94 95 96 97
	pgoff_t start;		/* start of range currently being fallocated */
	pgoff_t next;		/* the next page offset to be fallocated */
	pgoff_t nr_falloced;	/* how many new pages have been fallocated */
	pgoff_t nr_unswapped;	/* how often writepage refused to swap out */
};

98
/* Flag allocation requirements to shmem_getpage */
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101
enum sgp_type {
	SGP_READ,	/* don't exceed i_size, don't allocate page */
	SGP_CACHE,	/* don't exceed i_size, may allocate page */
102
	SGP_DIRTY,	/* like SGP_CACHE, but set new page dirty */
103 104
	SGP_WRITE,	/* may exceed i_size, may allocate !Uptodate page */
	SGP_FALLOC,	/* like SGP_WRITE, but make existing page Uptodate */
Linus Torvalds's avatar
Linus Torvalds committed
105 106
};

Andrew Morton's avatar
Andrew Morton committed
107
#ifdef CONFIG_TMPFS
108 109 110 111 112 113 114 115 116
static unsigned long shmem_default_max_blocks(void)
{
	return totalram_pages / 2;
}

static unsigned long shmem_default_max_inodes(void)
{
	return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
}
Andrew Morton's avatar
Andrew Morton committed
117
#endif
118

119 120 121
static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
				struct shmem_inode_info *info, pgoff_t index);
122 123 124 125 126 127 128 129 130
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
	struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);

static inline int shmem_getpage(struct inode *inode, pgoff_t index,
	struct page **pagep, enum sgp_type sgp, int *fault_type)
{
	return shmem_getpage_gfp(inode, index, pagep, sgp,
			mapping_gfp_mask(inode->i_mapping), fault_type);
}
Linus Torvalds's avatar
Linus Torvalds committed
131 132 133 134 135 136 137 138 139 140 141 142 143 144

static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
	return sb->s_fs_info;
}

/*
 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
 * for shared memory and for shared anonymous (/dev/zero) mappings
 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
 * consistent with the pre-accounting of private mappings ...
 */
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
145
	return (flags & VM_NORESERVE) ?
146
		0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
Linus Torvalds's avatar
Linus Torvalds committed
147 148 149 150
}

static inline void shmem_unacct_size(unsigned long flags, loff_t size)
{
151
	if (!(flags & VM_NORESERVE))
Linus Torvalds's avatar
Linus Torvalds committed
152 153 154
		vm_unacct_memory(VM_ACCT(size));
}

155 156 157 158 159 160 161 162 163 164 165 166 167
static inline int shmem_reacct_size(unsigned long flags,
		loff_t oldsize, loff_t newsize)
{
	if (!(flags & VM_NORESERVE)) {
		if (VM_ACCT(newsize) > VM_ACCT(oldsize))
			return security_vm_enough_memory_mm(current->mm,
					VM_ACCT(newsize) - VM_ACCT(oldsize));
		else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
			vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
	}
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
168 169 170 171 172 173 174 175
/*
 * ... whereas tmpfs objects are accounted incrementally as
 * pages are allocated, in order to allow huge sparse files.
 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
 */
static inline int shmem_acct_block(unsigned long flags)
{
176
	return (flags & VM_NORESERVE) ?
177
		security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
Linus Torvalds's avatar
Linus Torvalds committed
178 179 180 181
}

static inline void shmem_unacct_blocks(unsigned long flags, long pages)
{
182
	if (flags & VM_NORESERVE)
Linus Torvalds's avatar
Linus Torvalds committed
183 184 185
		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}

186
static const struct super_operations shmem_ops;
187
static const struct address_space_operations shmem_aops;
188
static const struct file_operations shmem_file_operations;
189 190 191
static const struct inode_operations shmem_inode_operations;
static const struct inode_operations shmem_dir_inode_operations;
static const struct inode_operations shmem_special_inode_operations;
192
static const struct vm_operations_struct shmem_vm_ops;
Linus Torvalds's avatar
Linus Torvalds committed
193 194

static LIST_HEAD(shmem_swaplist);
195
static DEFINE_MUTEX(shmem_swaplist_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
196

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static int shmem_reserve_inode(struct super_block *sb)
{
	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
	if (sbinfo->max_inodes) {
		spin_lock(&sbinfo->stat_lock);
		if (!sbinfo->free_inodes) {
			spin_unlock(&sbinfo->stat_lock);
			return -ENOSPC;
		}
		sbinfo->free_inodes--;
		spin_unlock(&sbinfo->stat_lock);
	}
	return 0;
}

static void shmem_free_inode(struct super_block *sb)
{
	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
	if (sbinfo->max_inodes) {
		spin_lock(&sbinfo->stat_lock);
		sbinfo->free_inodes++;
		spin_unlock(&sbinfo->stat_lock);
	}
}

222
/**
223
 * shmem_recalc_inode - recalculate the block usage of an inode
Linus Torvalds's avatar
Linus Torvalds committed
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
 * @inode: inode to recalc
 *
 * We have to calculate the free blocks since the mm can drop
 * undirtied hole pages behind our back.
 *
 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
 *
 * It has to be called with the spinlock held.
 */
static void shmem_recalc_inode(struct inode *inode)
{
	struct shmem_inode_info *info = SHMEM_I(inode);
	long freed;

	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
	if (freed > 0) {
241 242 243
		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
		if (sbinfo->max_blocks)
			percpu_counter_add(&sbinfo->used_blocks, -freed);
Linus Torvalds's avatar
Linus Torvalds committed
244
		info->alloced -= freed;
245
		inode->i_blocks -= freed * BLOCKS_PER_PAGE;
Linus Torvalds's avatar
Linus Torvalds committed
246 247 248 249
		shmem_unacct_blocks(info->flags, freed);
	}
}

250 251 252 253 254 255 256
/*
 * Replace item expected in radix tree by a new item, while holding tree lock.
 */
static int shmem_radix_tree_replace(struct address_space *mapping,
			pgoff_t index, void *expected, void *replacement)
{
	void **pslot;
257
	void *item;
258 259

	VM_BUG_ON(!expected);
260
	VM_BUG_ON(!replacement);
261
	pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
262 263 264
	if (!pslot)
		return -ENOENT;
	item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
265 266
	if (item != expected)
		return -ENOENT;
267
	radix_tree_replace_slot(pslot, replacement);
268 269 270
	return 0;
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
/*
 * Sometimes, before we decide whether to proceed or to fail, we must check
 * that an entry was not already brought back from swap by a racing thread.
 *
 * Checking page is not enough: by the time a SwapCache page is locked, it
 * might be reused, and again be SwapCache, using the same swap as before.
 */
static bool shmem_confirm_swap(struct address_space *mapping,
			       pgoff_t index, swp_entry_t swap)
{
	void *item;

	rcu_read_lock();
	item = radix_tree_lookup(&mapping->page_tree, index);
	rcu_read_unlock();
	return item == swp_to_radix_entry(swap);
}

289 290 291 292 293
/*
 * Like add_to_page_cache_locked, but error if expected item has gone.
 */
static int shmem_add_to_page_cache(struct page *page,
				   struct address_space *mapping,
294
				   pgoff_t index, void *expected)
295
{
296
	int error;
297

298 299
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
300

301 302 303 304 305
	page_cache_get(page);
	page->mapping = mapping;
	page->index = index;

	spin_lock_irq(&mapping->tree_lock);
306
	if (!expected)
307 308 309 310
		error = radix_tree_insert(&mapping->page_tree, index, page);
	else
		error = shmem_radix_tree_replace(mapping, index, expected,
								 page);
311
	if (!error) {
312 313 314 315 316 317 318 319
		mapping->nrpages++;
		__inc_zone_page_state(page, NR_FILE_PAGES);
		__inc_zone_page_state(page, NR_SHMEM);
		spin_unlock_irq(&mapping->tree_lock);
	} else {
		page->mapping = NULL;
		spin_unlock_irq(&mapping->tree_lock);
		page_cache_release(page);
320 321 322 323
	}
	return error;
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
/*
 * Like delete_from_page_cache, but substitutes swap for page.
 */
static void shmem_delete_from_page_cache(struct page *page, void *radswap)
{
	struct address_space *mapping = page->mapping;
	int error;

	spin_lock_irq(&mapping->tree_lock);
	error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
	page->mapping = NULL;
	mapping->nrpages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	__dec_zone_page_state(page, NR_SHMEM);
	spin_unlock_irq(&mapping->tree_lock);
	page_cache_release(page);
	BUG_ON(error);
}

343 344 345 346 347 348
/*
 * Remove swap entry from radix tree, free the swap and its page cache.
 */
static int shmem_free_swap(struct address_space *mapping,
			   pgoff_t index, void *radswap)
{
349
	void *old;
350 351

	spin_lock_irq(&mapping->tree_lock);
352
	old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
353
	spin_unlock_irq(&mapping->tree_lock);
354 355 356 357
	if (old != radswap)
		return -ENOENT;
	free_swap_and_cache(radix_to_swp_entry(radswap));
	return 0;
358 359
}

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/*
 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
 */
void shmem_unlock_mapping(struct address_space *mapping)
{
	struct pagevec pvec;
	pgoff_t indices[PAGEVEC_SIZE];
	pgoff_t index = 0;

	pagevec_init(&pvec, 0);
	/*
	 * Minor point, but we might as well stop if someone else SHM_LOCKs it.
	 */
	while (!mapping_unevictable(mapping)) {
		/*
		 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
		 * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
		 */
378 379
		pvec.nr = find_get_entries(mapping, index,
					   PAGEVEC_SIZE, pvec.pages, indices);
380 381 382
		if (!pvec.nr)
			break;
		index = indices[pvec.nr - 1] + 1;
383
		pagevec_remove_exceptionals(&pvec);
384 385 386 387
		check_move_unevictable_pages(pvec.pages, pvec.nr);
		pagevec_release(&pvec);
		cond_resched();
	}
388 389 390 391
}

/*
 * Remove range of pages and swap entries from radix tree, and free them.
392
 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
393
 */
394 395
static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
								 bool unfalloc)
Linus Torvalds's avatar
Linus Torvalds committed
396
{
397
	struct address_space *mapping = inode->i_mapping;
Linus Torvalds's avatar
Linus Torvalds committed
398
	struct shmem_inode_info *info = SHMEM_I(inode);
399
	pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
400 401 402
	pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
	unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
	unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
403
	struct pagevec pvec;
404 405
	pgoff_t indices[PAGEVEC_SIZE];
	long nr_swaps_freed = 0;
406
	pgoff_t index;
407 408
	int i;

409 410
	if (lend == -1)
		end = -1;	/* unsigned, so actually very big */
411 412 413

	pagevec_init(&pvec, 0);
	index = start;
414
	while (index < end) {
415 416 417
		pvec.nr = find_get_entries(mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			pvec.pages, indices);
418 419
		if (!pvec.nr)
			break;
420 421 422
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

423
			index = indices[i];
424
			if (index >= end)
425 426
				break;

427
			if (radix_tree_exceptional_entry(page)) {
428 429
				if (unfalloc)
					continue;
430 431
				nr_swaps_freed += !shmem_free_swap(mapping,
								index, page);
432
				continue;
433 434 435
			}

			if (!trylock_page(page))
436
				continue;
437 438
			if (!unfalloc || !PageUptodate(page)) {
				if (page->mapping == mapping) {
439
					VM_BUG_ON_PAGE(PageWriteback(page), page);
440 441
					truncate_inode_page(mapping, page);
				}
442 443 444
			}
			unlock_page(page);
		}
445
		pagevec_remove_exceptionals(&pvec);
446
		pagevec_release(&pvec);
447 448 449
		cond_resched();
		index++;
	}
Linus Torvalds's avatar
Linus Torvalds committed
450

451
	if (partial_start) {
452 453 454
		struct page *page = NULL;
		shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
		if (page) {
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
			unsigned int top = PAGE_CACHE_SIZE;
			if (start > end) {
				top = partial_end;
				partial_end = 0;
			}
			zero_user_segment(page, partial_start, top);
			set_page_dirty(page);
			unlock_page(page);
			page_cache_release(page);
		}
	}
	if (partial_end) {
		struct page *page = NULL;
		shmem_getpage(inode, end, &page, SGP_READ, NULL);
		if (page) {
			zero_user_segment(page, 0, partial_end);
471 472 473 474 475
			set_page_dirty(page);
			unlock_page(page);
			page_cache_release(page);
		}
	}
476 477
	if (start >= end)
		return;
478 479

	index = start;
480
	while (index < end) {
481
		cond_resched();
482 483

		pvec.nr = find_get_entries(mapping, index,
484
				min(end - index, (pgoff_t)PAGEVEC_SIZE),
485
				pvec.pages, indices);
486
		if (!pvec.nr) {
487 488
			/* If all gone or hole-punch or unfalloc, we're done */
			if (index == start || end != -1)
489
				break;
490
			/* But if truncating, restart to make sure all gone */
491 492 493 494 495 496
			index = start;
			continue;
		}
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

497
			index = indices[i];
498
			if (index >= end)
499 500
				break;

501
			if (radix_tree_exceptional_entry(page)) {
502 503
				if (unfalloc)
					continue;
504 505 506 507 508 509
				if (shmem_free_swap(mapping, index, page)) {
					/* Swap was replaced by page: retry */
					index--;
					break;
				}
				nr_swaps_freed++;
510 511 512
				continue;
			}

513
			lock_page(page);
514 515
			if (!unfalloc || !PageUptodate(page)) {
				if (page->mapping == mapping) {
516
					VM_BUG_ON_PAGE(PageWriteback(page), page);
517
					truncate_inode_page(mapping, page);
518 519 520 521 522
				} else {
					/* Page was replaced by swap: retry */
					unlock_page(page);
					index--;
					break;
523
				}
524
			}
525 526
			unlock_page(page);
		}
527
		pagevec_remove_exceptionals(&pvec);
528
		pagevec_release(&pvec);
529 530
		index++;
	}
531

Linus Torvalds's avatar
Linus Torvalds committed
532
	spin_lock(&info->lock);
533
	info->swapped -= nr_swaps_freed;
Linus Torvalds's avatar
Linus Torvalds committed
534 535
	shmem_recalc_inode(inode);
	spin_unlock(&info->lock);
536
}
Linus Torvalds's avatar
Linus Torvalds committed
537

538 539 540
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	shmem_undo_range(inode, lstart, lend, false);
541
	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
Linus Torvalds's avatar
Linus Torvalds committed
542
}
543
EXPORT_SYMBOL_GPL(shmem_truncate_range);
Linus Torvalds's avatar
Linus Torvalds committed
544

545
static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
Linus Torvalds's avatar
Linus Torvalds committed
546 547
{
	struct inode *inode = dentry->d_inode;
David Herrmann's avatar
David Herrmann committed
548
	struct shmem_inode_info *info = SHMEM_I(inode);
Linus Torvalds's avatar
Linus Torvalds committed
549 550
	int error;

551 552 553 554
	error = inode_change_ok(inode, attr);
	if (error)
		return error;

555 556 557
	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
		loff_t oldsize = inode->i_size;
		loff_t newsize = attr->ia_size;
558

David Herrmann's avatar
David Herrmann committed
559 560 561 562 563
		/* protected by i_mutex */
		if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
		    (newsize > oldsize && (info->seals & F_SEAL_GROW)))
			return -EPERM;

564
		if (newsize != oldsize) {
565 566 567 568
			error = shmem_reacct_size(SHMEM_I(inode)->flags,
					oldsize, newsize);
			if (error)
				return error;
569 570 571 572 573 574 575 576 577 578
			i_size_write(inode, newsize);
			inode->i_ctime = inode->i_mtime = CURRENT_TIME;
		}
		if (newsize < oldsize) {
			loff_t holebegin = round_up(newsize, PAGE_SIZE);
			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
			shmem_truncate_range(inode, newsize, (loff_t)-1);
			/* unmap again to remove racily COWed private pages */
			unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
		}
Linus Torvalds's avatar
Linus Torvalds committed
579 580
	}

581 582
	setattr_copy(inode, attr);
	if (attr->ia_valid & ATTR_MODE)
Christoph Hellwig's avatar
Christoph Hellwig committed
583
		error = posix_acl_chmod(inode, inode->i_mode);
Linus Torvalds's avatar
Linus Torvalds committed
584 585 586
	return error;
}

Al Viro's avatar
Al Viro committed
587
static void shmem_evict_inode(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
588 589 590
{
	struct shmem_inode_info *info = SHMEM_I(inode);

591
	if (inode->i_mapping->a_ops == &shmem_aops) {
Linus Torvalds's avatar
Linus Torvalds committed
592 593
		shmem_unacct_size(info->flags, inode->i_size);
		inode->i_size = 0;
594
		shmem_truncate_range(inode, 0, (loff_t)-1);
Linus Torvalds's avatar
Linus Torvalds committed
595
		if (!list_empty(&info->swaplist)) {
596
			mutex_lock(&shmem_swaplist_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
597
			list_del_init(&info->swaplist);
598
			mutex_unlock(&shmem_swaplist_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
599
		}
600 601
	} else
		kfree(info->symlink);
602

603
	simple_xattrs_free(&info->xattrs);
604
	WARN_ON(inode->i_blocks);
605
	shmem_free_inode(inode->i_sb);
606
	clear_inode(inode);
Linus Torvalds's avatar
Linus Torvalds committed
607 608
}

609 610 611
/*
 * If swap found in inode, free it and move page from swapcache to filecache.
 */
612
static int shmem_unuse_inode(struct shmem_inode_info *info,
613
			     swp_entry_t swap, struct page **pagep)
Linus Torvalds's avatar
Linus Torvalds committed
614
{
615
	struct address_space *mapping = info->vfs_inode.i_mapping;
616
	void *radswap;
617
	pgoff_t index;
618 619
	gfp_t gfp;
	int error = 0;
Linus Torvalds's avatar
Linus Torvalds committed
620

621
	radswap = swp_to_radix_entry(swap);
622
	index = radix_tree_locate_item(&mapping->page_tree, radswap);
623
	if (index == -1)
624
		return -EAGAIN;	/* tell shmem_unuse we found nothing */
625

Hugh Dickins's avatar
Hugh Dickins committed
626 627
	/*
	 * Move _head_ to start search for next from here.
Al Viro's avatar
Al Viro committed
628
	 * But be careful: shmem_evict_inode checks list_empty without taking
Hugh Dickins's avatar
Hugh Dickins committed
629
	 * mutex, and there's an instant in list_move_tail when info->swaplist
630
	 * would appear empty, if it were the only one on shmem_swaplist.
Hugh Dickins's avatar
Hugh Dickins committed
631 632 633
	 */
	if (shmem_swaplist.next != &info->swaplist)
		list_move_tail(&shmem_swaplist, &info->swaplist);
634

635 636 637 638 639 640 641
	gfp = mapping_gfp_mask(mapping);
	if (shmem_should_replace_page(*pagep, gfp)) {
		mutex_unlock(&shmem_swaplist_mutex);
		error = shmem_replace_page(pagep, gfp, info, index);
		mutex_lock(&shmem_swaplist_mutex);
		/*
		 * We needed to drop mutex to make that restrictive page
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656
		 * allocation, but the inode might have been freed while we
		 * dropped it: although a racing shmem_evict_inode() cannot
		 * complete without emptying the radix_tree, our page lock
		 * on this swapcache page is not enough to prevent that -
		 * free_swap_and_cache() of our swap entry will only
		 * trylock_page(), removing swap from radix_tree whatever.
		 *
		 * We must not proceed to shmem_add_to_page_cache() if the
		 * inode has been freed, but of course we cannot rely on
		 * inode or mapping or info to check that.  However, we can
		 * safely check if our swap entry is still in use (and here
		 * it can't have got reused for another page): if it's still
		 * in use, then the inode cannot have been freed yet, and we
		 * can safely proceed (if it's no longer in use, that tells
		 * nothing about the inode, but we don't need to unuse swap).
657 658 659 660 661
		 */
		if (!page_swapcount(*pagep))
			error = -ENOENT;
	}

KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
662
	/*
663 664 665
	 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
	 * but also to hold up shmem_evict_inode(): so inode cannot be freed
	 * beneath us (pagelock doesn't help until the page is in pagecache).
KAMEZAWA Hiroyuki's avatar
KAMEZAWA Hiroyuki committed
666
	 */
667 668
	if (!error)
		error = shmem_add_to_page_cache(*pagep, mapping, index,
669
						radswap);
670
	if (error != -ENOMEM) {
671 672 673 674
		/*
		 * Truncation and eviction use free_swap_and_cache(), which
		 * only does trylock page: if we raced, best clean up here.
		 */
675 676
		delete_from_swap_cache(*pagep);
		set_page_dirty(*pagep);
677 678 679 680 681 682
		if (!error) {
			spin_lock(&info->lock);
			info->swapped--;
			spin_unlock(&info->lock);
			swap_free(swap);
		}
Linus Torvalds's avatar
Linus Torvalds committed
683
	}
684
	return error;
Linus Torvalds's avatar
Linus Torvalds committed
685 686 687
}

/*
688
 * Search through swapped inodes to find and replace swap by page.
Linus Torvalds's avatar
Linus Torvalds committed
689
 */
690
int shmem_unuse(swp_entry_t swap, struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
691
{
692
	struct list_head *this, *next;
Linus Torvalds's avatar
Linus Torvalds committed
693
	struct shmem_inode_info *info;
694
	struct mem_cgroup *memcg;
695 696 697 698
	int error = 0;

	/*
	 * There's a faint possibility that swap page was replaced before
699
	 * caller locked it: caller will come back later with the right page.
700
	 */
701
	if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
702
		goto out;
703 704 705 706 707 708

	/*
	 * Charge page using GFP_KERNEL while we can wait, before taking
	 * the shmem_swaplist_mutex which might hold up shmem_writepage().
	 * Charged back to the user (not to caller) when swap account is used.
	 */
709
	error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg);
710 711
	if (error)
		goto out;
712
	/* No radix_tree_preload: swap entry keeps a place for page in tree */
713
	error = -EAGAIN;
Linus Torvalds's avatar
Linus Torvalds committed
714

715
	mutex_lock(&shmem_swaplist_mutex);
716 717
	list_for_each_safe(this, next, &shmem_swaplist) {
		info = list_entry(this, struct shmem_inode_info, swaplist);
718
		if (info->swapped)
719
			error = shmem_unuse_inode(info, swap, &page);
720 721
		else
			list_del_init(&info->swaplist);
722
		cond_resched();
723
		if (error != -EAGAIN)
724
			break;
725
		/* found nothing in this: move on to search the next */
Linus Torvalds's avatar
Linus Torvalds committed
726
	}
727
	mutex_unlock(&shmem_swaplist_mutex);
728

729 730 731 732 733 734
	if (error) {
		if (error != -ENOMEM)
			error = 0;
		mem_cgroup_cancel_charge(page, memcg);
	} else
		mem_cgroup_commit_charge(page, memcg, true);
735
out:
Hugh Dickins's avatar
Hugh Dickins committed
736 737
	unlock_page(page);
	page_cache_release(page);
738
	return error;
Linus Torvalds's avatar
Linus Torvalds committed
739 740 741 742 743 744 745 746 747 748
}

/*
 * Move the page from the page cache to the swap cache.
 */
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
	struct shmem_inode_info *info;
	struct address_space *mapping;
	struct inode *inode;
749 750
	swp_entry_t swap;
	pgoff_t index;
Linus Torvalds's avatar
Linus Torvalds committed
751 752 753 754 755 756 757 758

	BUG_ON(!PageLocked(page));
	mapping = page->mapping;
	index = page->index;
	inode = mapping->host;
	info = SHMEM_I(inode);
	if (info->flags & VM_LOCKED)
		goto redirty;
759
	if (!total_swap_pages)
Linus Torvalds's avatar
Linus Torvalds committed
760 761
		goto redirty;

762
	/*
763 764 765 766 767
	 * Our capabilities prevent regular writeback or sync from ever calling
	 * shmem_writepage; but a stacking filesystem might use ->writepage of
	 * its underlying filesystem, in which case tmpfs should write out to
	 * swap only in response to memory pressure, and not for the writeback
	 * threads or sync.
768
	 */
769 770 771 772
	if (!wbc->for_reclaim) {
		WARN_ON_ONCE(1);	/* Still happens? Tell us about it! */
		goto redirty;
	}
773 774 775 776 777

	/*
	 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
	 * value into swapfile.c, the only way we can correctly account for a
	 * fallocated page arriving here is now to initialize it and write it.
778 779 780 781 782 783
	 *
	 * That's okay for a page already fallocated earlier, but if we have
	 * not yet completed the fallocation, then (a) we want to keep track
	 * of this page in case we have to undo it, and (b) it may not be a
	 * good idea to continue anyway, once we're pushing into swap.  So
	 * reactivate the page, and let shmem_fallocate() quit when too many.
784 785
	 */
	if (!PageUptodate(page)) {
786 787 788 789 790
		if (inode->i_private) {
			struct shmem_falloc *shmem_falloc;
			spin_lock(&inode->i_lock);
			shmem_falloc = inode->i_private;
			if (shmem_falloc &&
791
			    !shmem_falloc->waitq &&
792 793 794 795 796 797 798 799 800
			    index >= shmem_falloc->start &&
			    index < shmem_falloc->next)
				shmem_falloc->nr_unswapped++;
			else
				shmem_falloc = NULL;
			spin_unlock(&inode->i_lock);
			if (shmem_falloc)
				goto redirty;
		}
801 802 803 804 805
		clear_highpage(page);
		flush_dcache_page(page);
		SetPageUptodate(page);
	}

806 807 808
	swap = get_swap_page();
	if (!swap.val)
		goto redirty;
809

810 811
	/*
	 * Add inode to shmem_unuse()'s list of swapped-out inodes,
812 813
	 * if it's not already there.  Do it now before the page is
	 * moved to swap cache, when its pagelock no longer protects
814
	 * the inode from eviction.  But don't unlock the mutex until
815 816
	 * we've incremented swapped, because shmem_unuse_inode() will
	 * prune a !swapped inode from the swaplist under this mutex.
817
	 */
818 819 820
	mutex_lock(&shmem_swaplist_mutex);
	if (list_empty(&info->swaplist))
		list_add_tail(&info->swaplist, &shmem_swaplist);
821

822
	if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
Hugh Dickins's avatar
Hugh Dickins committed
823
		swap_shmem_alloc(swap);
824 825 826 827 828
		shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));

		spin_lock(&info->lock);
		info->swapped++;
		shmem_recalc_inode(inode);
829
		spin_unlock(&info->lock);
830 831

		mutex_unlock(&shmem_swaplist_mutex);
832
		BUG_ON(page_mapped(page));
833
		swap_writepage(page, wbc);
Linus Torvalds's avatar
Linus Torvalds committed
834 835 836
		return 0;
	}

837
	mutex_unlock(&shmem_swaplist_mutex);
838
	swapcache_free(swap);
Linus Torvalds's avatar
Linus Torvalds committed
839 840
redirty:
	set_page_dirty(page);
841 842 843 844
	if (wbc->for_reclaim)
		return AOP_WRITEPAGE_ACTIVATE;	/* Return with page locked */
	unlock_page(page);
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
845 846 847
}

#ifdef CONFIG_NUMA
848
#ifdef CONFIG_TMPFS
849
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
850
{
851
	char buffer[64];
852

853
	if (!mpol || mpol->mode == MPOL_DEFAULT)
854
		return;		/* show nothing */
855

856
	mpol_to_str(buffer, sizeof(buffer), mpol);
857 858

	seq_printf(seq, ",mpol=%s", buffer);
859
}
860 861 862 863 864 865 866 867 868 869 870 871

static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
{
	struct mempolicy *mpol = NULL;
	if (sbinfo->mpol) {
		spin_lock(&sbinfo->stat_lock);	/* prevent replace/use races */
		mpol = sbinfo->mpol;
		mpol_get(mpol);
		spin_unlock(&sbinfo->stat_lock);
	}
	return mpol;
}
872 873
#endif /* CONFIG_TMPFS */

874 875
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
			struct shmem_inode_info *info, pgoff_t index)
Linus Torvalds's avatar
Linus Torvalds committed
876 877
{
	struct vm_area_struct pvma;
Mel Gorman's avatar
Mel Gorman committed
878
	struct page *page;
879

Linus Torvalds's avatar
Linus Torvalds committed
880
	/* Create a pseudo vma that just contains the policy */
881
	pvma.vm_start = 0;
882 883
	/* Bias interleave by inode number to distribute better across nodes */
	pvma.vm_pgoff = index + info->vfs_inode.i_ino;
884
	pvma.vm_ops = NULL;
Mel Gorman's avatar
Mel Gorman committed
885 886 887 888 889 890 891 892
	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);

	page = swapin_readahead(swap, gfp, &pvma, 0);

	/* Drop reference taken by mpol_shared_policy_lookup() */
	mpol_cond_put(pvma.vm_policy);

	return page;
Linus Torvalds's avatar
Linus Torvalds committed
893 894
}

895
static struct page *shmem_alloc_page(gfp_t gfp,
896
			struct shmem_inode_info *info, pgoff_t index)
Linus Torvalds's avatar
Linus Torvalds committed
897 898
{
	struct vm_area_struct pvma;
Mel Gorman's avatar
Mel Gorman committed
899
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
900

901 902
	/* Create a pseudo vma that just contains the policy */
	pvma.vm_start = 0;
903 904
	/* Bias interleave by inode number to distribute better across nodes */
	pvma.vm_pgoff = index + info->vfs_inode.i_ino;
905
	pvma.vm_ops = NULL;
906
	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
907

Mel Gorman's avatar
Mel Gorman committed
908 909 910 911 912 913
	page = alloc_page_vma(gfp, &pvma, 0);

	/* Drop reference taken by mpol_shared_policy_lookup() */
	mpol_cond_put(pvma.vm_policy);

	return page;
Linus Torvalds's avatar
Linus Torvalds committed
914
}
915 916
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
917
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
918 919 920 921
{
}
#endif /* CONFIG_TMPFS */

922 923
static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
			struct shmem_inode_info *info, pgoff_t index)
Linus Torvalds's avatar
Linus Torvalds committed
924
{
925
	return swapin_readahead(swap, gfp, NULL, 0);
Linus Torvalds's avatar
Linus Torvalds committed
926 927
}

928
static inline struct page *shmem_alloc_page(gfp_t gfp,
929
			struct shmem_inode_info *info, pgoff_t index)
Linus Torvalds's avatar
Linus Torvalds committed
930
{
931
	return alloc_page(gfp);
Linus Torvalds's avatar
Linus Torvalds committed
932
}
933
#endif /* CONFIG_NUMA */
Linus Torvalds's avatar
Linus Torvalds committed
934

935 936 937 938 939 940 941
#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
{
	return NULL;
}
#endif

942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969