truncate.c 23.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 10Sep2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
7 8 9 10
 *		Initial version.
 */

#include <linux/kernel.h>
11
#include <linux/backing-dev.h>
12
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
13
#include <linux/mm.h>
14
#include <linux/swap.h>
15
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
16
#include <linux/pagemap.h>
17
#include <linux/highmem.h>
Linus Torvalds's avatar
Linus Torvalds committed
18
#include <linux/pagevec.h>
19
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
21
				   do_invalidatepage */
22
#include <linux/cleancache.h>
23
#include <linux/rmap.h>
24
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
25

26 27 28
static void clear_exceptional_entry(struct address_space *mapping,
				    pgoff_t index, void *entry)
{
29 30 31
	struct radix_tree_node *node;
	void **slot;

32 33 34 35 36 37 38 39 40 41
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

	spin_lock_irq(&mapping->tree_lock);
	/*
	 * Regular page slots are stabilized by the page lock even
	 * without the tree itself locked.  These unlocked entries
	 * need verification under the tree lock.
	 */
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
		goto unlock;
	if (*slot != entry)
		goto unlock;
	radix_tree_replace_slot(slot, NULL);
	mapping->nrshadows--;
	if (!node)
		goto unlock;
	workingset_node_shadows_dec(node);
	/*
	 * Don't track node without shadow entries.
	 *
	 * Avoid acquiring the list_lru lock if already untracked.
	 * The list_empty() test is safe as node->private_list is
	 * protected by mapping->tree_lock.
	 */
	if (!workingset_node_shadows(node) &&
	    !list_empty(&node->private_list))
		list_lru_del(&workingset_shadow_nodes, &node->private_list);
	__radix_tree_delete_node(&mapping->page_tree, node);
unlock:
63 64
	spin_unlock_irq(&mapping->tree_lock);
}
Linus Torvalds's avatar
Linus Torvalds committed
65

66
/**
67
 * do_invalidatepage - invalidate part or all of a page
68
 * @page: the page which is affected
69 70
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
71 72 73 74 75 76 77 78 79 80
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
81 82
void do_invalidatepage(struct page *page, unsigned int offset,
		       unsigned int length)
83
{
84 85
	void (*invalidatepage)(struct page *, unsigned int, unsigned int);

86
	invalidatepage = page->mapping->a_ops->invalidatepage;
87
#ifdef CONFIG_BLOCK
88 89
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
90
#endif
91
	if (invalidatepage)
92
		(*invalidatepage)(page, offset, length);
93 94
}

Linus Torvalds's avatar
Linus Torvalds committed
95 96
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
97
 * becomes orphaned.  It will be left on the LRU and may even be mapped into
98
 * user pagetables if we're racing with filemap_fault().
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
102
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
Linus Torvalds's avatar
Linus Torvalds committed
103 104
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
105
static int
Linus Torvalds's avatar
Linus Torvalds committed
106 107 108
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
109
		return -EIO;
Linus Torvalds's avatar
Linus Torvalds committed
110

111
	if (page_has_private(page))
112
		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
113

114 115 116 117 118
	/*
	 * Some filesystems seem to re-dirty the page even after
	 * the VM has canceled the dirty bit (eg ext3 journaling).
	 * Hence dirty accounting check is placed after invalidation.
	 */
119
	cancel_dirty_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
120
	ClearPageMappedToDisk(page);
121
	delete_from_page_cache(page);
122
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
123 124 125
}

/*
126
 * This is for invalidate_mapping_pages().  That function can be called at
Linus Torvalds's avatar
Linus Torvalds committed
127
 * any time, and is not supposed to throw away dirty pages.  But pages can
128 129
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
Linus Torvalds's avatar
Linus Torvalds committed
130 131 132 133 134 135
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
136 137
	int ret;

Linus Torvalds's avatar
Linus Torvalds committed
138 139 140
	if (page->mapping != mapping)
		return 0;

141
	if (page_has_private(page) && !try_to_release_page(page, 0))
Linus Torvalds's avatar
Linus Torvalds committed
142 143
		return 0;

144 145 146
	ret = remove_mapping(mapping, page);

	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
147 148
}

149 150 151 152 153 154 155 156 157 158
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
	if (page_mapped(page)) {
		unmap_mapping_range(mapping,
				   (loff_t)page->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE, 0);
	}
	return truncate_complete_page(mapping, page);
}

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/*
 * Used to get rid of pages on hardware memory corruption.
 */
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
	if (!mapping)
		return -EINVAL;
	/*
	 * Only punch for normal data pages for now.
	 * Handling other types like directories would need more auditing.
	 */
	if (!S_ISREG(mapping->host->i_mode))
		return -EIO;
	return truncate_inode_page(mapping, page);
}
EXPORT_SYMBOL(generic_error_remove_page);

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int invalidate_inode_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	if (!mapping)
		return 0;
	if (PageDirty(page) || PageWriteback(page))
		return 0;
	if (page_mapped(page))
		return 0;
	return invalidate_complete_page(mapping, page);
}

Linus Torvalds's avatar
Linus Torvalds committed
194
/**
195
 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
Linus Torvalds's avatar
Linus Torvalds committed
196 197
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
198
 * @lend: offset to which to truncate (inclusive)
Linus Torvalds's avatar
Linus Torvalds committed
199
 *
200
 * Truncate the page cache, removing the pages that are between
201 202
 * specified offsets (and zeroing out partial pages
 * if lstart or lend + 1 is not page aligned).
Linus Torvalds's avatar
Linus Torvalds committed
203 204 205 206 207 208 209 210 211 212
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
213 214 215 216
 *
 * Note that since ->invalidatepage() accepts range to invalidate
 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 * page aligned properly.
Linus Torvalds's avatar
Linus Torvalds committed
217
 */
218 219
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
Linus Torvalds's avatar
Linus Torvalds committed
220
{
221 222 223 224 225
	pgoff_t		start;		/* inclusive */
	pgoff_t		end;		/* exclusive */
	unsigned int	partial_start;	/* inclusive */
	unsigned int	partial_end;	/* exclusive */
	struct pagevec	pvec;
226
	pgoff_t		indices[PAGEVEC_SIZE];
227 228
	pgoff_t		index;
	int		i;
Linus Torvalds's avatar
Linus Torvalds committed
229

230
	cleancache_invalidate_inode(mapping);
231
	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
Linus Torvalds's avatar
Linus Torvalds committed
232 233
		return;

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	/* Offsets within partial pages */
	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);

	/*
	 * 'start' and 'end' always covers the range of pages to be fully
	 * truncated. Partial pages are covered with 'partial_start' at the
	 * start of the range and 'partial_end' at the end of the range.
	 * Note that 'end' is exclusive while 'lend' is inclusive.
	 */
	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (lend == -1)
		/*
		 * lend == -1 indicates end-of-file so we have to set 'end'
		 * to the highest possible pgoff_t and since the type is
		 * unsigned we're using -1.
		 */
		end = -1;
	else
		end = (lend + 1) >> PAGE_CACHE_SHIFT;
254

Linus Torvalds's avatar
Linus Torvalds committed
255
	pagevec_init(&pvec, 0);
256
	index = start;
257 258 259
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
Linus Torvalds's avatar
Linus Torvalds committed
260 261 262
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

263
			/* We rely upon deletion not changing page->index */
264
			index = indices[i];
265
			if (index >= end)
266 267
				break;

268 269 270 271 272
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

Nick Piggin's avatar
Nick Piggin committed
273
			if (!trylock_page(page))
Linus Torvalds's avatar
Linus Torvalds committed
274
				continue;
275
			WARN_ON(page->index != index);
Linus Torvalds's avatar
Linus Torvalds committed
276 277 278 279
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
280
			truncate_inode_page(mapping, page);
Linus Torvalds's avatar
Linus Torvalds committed
281 282
			unlock_page(page);
		}
283
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
284 285
		pagevec_release(&pvec);
		cond_resched();
286
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
287 288
	}

289
	if (partial_start) {
Linus Torvalds's avatar
Linus Torvalds committed
290 291
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
292 293 294 295 296 297
			unsigned int top = PAGE_CACHE_SIZE;
			if (start > end) {
				/* Truncation within a single page */
				top = partial_end;
				partial_end = 0;
			}
Linus Torvalds's avatar
Linus Torvalds committed
298
			wait_on_page_writeback(page);
299 300 301 302 303
			zero_user_segment(page, partial_start, top);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, partial_start,
						  top - partial_start);
Linus Torvalds's avatar
Linus Torvalds committed
304 305 306 307
			unlock_page(page);
			page_cache_release(page);
		}
	}
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	if (partial_end) {
		struct page *page = find_lock_page(mapping, end);
		if (page) {
			wait_on_page_writeback(page);
			zero_user_segment(page, 0, partial_end);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, 0,
						  partial_end);
			unlock_page(page);
			page_cache_release(page);
		}
	}
	/*
	 * If the truncation happened within a single page no pages
	 * will be released, just zeroed, so we can bail out now.
	 */
	if (start >= end)
		return;
Linus Torvalds's avatar
Linus Torvalds committed
327

328
	index = start;
Linus Torvalds's avatar
Linus Torvalds committed
329 330
	for ( ; ; ) {
		cond_resched();
331
		if (!pagevec_lookup_entries(&pvec, mapping, index,
332 333
			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
			/* If all gone from start onwards, we're done */
334
			if (index == start)
Linus Torvalds's avatar
Linus Torvalds committed
335
				break;
336
			/* Otherwise restart to make sure all gone */
337
			index = start;
Linus Torvalds's avatar
Linus Torvalds committed
338 339
			continue;
		}
340
		if (index == start && indices[0] >= end) {
341
			/* All gone out of hole to be punched, we're done */
342
			pagevec_remove_exceptionals(&pvec);
343 344 345
			pagevec_release(&pvec);
			break;
		}
Linus Torvalds's avatar
Linus Torvalds committed
346 347 348
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

349
			/* We rely upon deletion not changing page->index */
350
			index = indices[i];
351 352 353
			if (index >= end) {
				/* Restart punch to make sure all gone */
				index = start - 1;
354
				break;
355
			}
356

357 358 359 360 361
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

Linus Torvalds's avatar
Linus Torvalds committed
362
			lock_page(page);
363
			WARN_ON(page->index != index);
Linus Torvalds's avatar
Linus Torvalds committed
364
			wait_on_page_writeback(page);
365
			truncate_inode_page(mapping, page);
Linus Torvalds's avatar
Linus Torvalds committed
366 367
			unlock_page(page);
		}
368
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
369
		pagevec_release(&pvec);
370
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
371
	}
372
	cleancache_invalidate_inode(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
373
}
374
EXPORT_SYMBOL(truncate_inode_pages_range);
Linus Torvalds's avatar
Linus Torvalds committed
375

376 377 378 379 380
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
381
 * Called under (and serialised by) inode->i_mutex.
382 383 384 385 386
 *
 * Note: When this function returns, there can be a page in the process of
 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 * mapping->nrpages can be non-zero when this function returns even after
 * truncation of the whole mapping.
387 388 389 390 391
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
Linus Torvalds's avatar
Linus Torvalds committed
392 393
EXPORT_SYMBOL(truncate_inode_pages);

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
/**
 * truncate_inode_pages_final - truncate *all* pages before inode dies
 * @mapping: mapping to truncate
 *
 * Called under (and serialized by) inode->i_mutex.
 *
 * Filesystems have to use this in the .evict_inode path to inform the
 * VM that this is the final truncate and the inode is going away.
 */
void truncate_inode_pages_final(struct address_space *mapping)
{
	unsigned long nrshadows;
	unsigned long nrpages;

	/*
	 * Page reclaim can not participate in regular inode lifetime
	 * management (can't call iput()) and thus can race with the
	 * inode teardown.  Tell it when the address space is exiting,
	 * so that it does not install eviction information after the
	 * final truncate has begun.
	 */
	mapping_set_exiting(mapping);

	/*
	 * When reclaim installs eviction entries, it increases
	 * nrshadows first, then decreases nrpages.  Make sure we see
	 * this in the right order or we might miss an entry.
	 */
	nrpages = mapping->nrpages;
	smp_rmb();
	nrshadows = mapping->nrshadows;

	if (nrpages || nrshadows) {
		/*
		 * As truncation uses a lockless tree lookup, cycle
		 * the tree lock to make sure any ongoing tree
		 * modification that does not see AS_EXITING is
		 * completed before starting the final truncate.
		 */
		spin_lock_irq(&mapping->tree_lock);
		spin_unlock_irq(&mapping->tree_lock);

		truncate_inode_pages(mapping, 0);
	}
}
EXPORT_SYMBOL(truncate_inode_pages_final);

441 442 443 444 445 446 447 448 449 450 451 452 453 454
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
455
		pgoff_t start, pgoff_t end)
Linus Torvalds's avatar
Linus Torvalds committed
456
{
457
	pgoff_t indices[PAGEVEC_SIZE];
Linus Torvalds's avatar
Linus Torvalds committed
458
	struct pagevec pvec;
459
	pgoff_t index = start;
460 461
	unsigned long ret;
	unsigned long count = 0;
Linus Torvalds's avatar
Linus Torvalds committed
462 463 464
	int i;

	pagevec_init(&pvec, 0);
465 466 467
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
Linus Torvalds's avatar
Linus Torvalds committed
468 469
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
470

471
			/* We rely upon deletion not changing page->index */
472
			index = indices[i];
473 474
			if (index > end)
				break;
475

476 477 478 479 480
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

481 482 483
			if (!trylock_page(page))
				continue;
			WARN_ON(page->index != index);
484
			ret = invalidate_inode_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
485
			unlock_page(page);
486 487 488 489 490
			/*
			 * Invalidation is a hint that the page is no longer
			 * of interest and try to speed up its reclaim.
			 */
			if (!ret)
491
				deactivate_file_page(page);
492
			count += ret;
Linus Torvalds's avatar
Linus Torvalds committed
493
		}
494
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
495
		pagevec_release(&pvec);
496
		cond_resched();
497
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
498
	}
499
	return count;
Linus Torvalds's avatar
Linus Torvalds committed
500
}
501
EXPORT_SYMBOL(invalidate_mapping_pages);
Linus Torvalds's avatar
Linus Torvalds committed
502

503 504 505 506
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
507 508
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
509 510 511 512
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
513 514 515
	struct mem_cgroup *memcg;
	unsigned long flags;

516 517 518
	if (page->mapping != mapping)
		return 0;

519
	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
520 521
		return 0;

522 523
	memcg = mem_cgroup_begin_page_stat(page);
	spin_lock_irqsave(&mapping->tree_lock, flags);
524 525 526
	if (PageDirty(page))
		goto failed;

527
	BUG_ON(page_has_private(page));
528 529 530
	__delete_from_page_cache(page, NULL, memcg);
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	mem_cgroup_end_page_stat(memcg);
531 532 533 534

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

535 536 537
	page_cache_release(page);	/* pagecache ref */
	return 1;
failed:
538 539
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
	mem_cgroup_end_page_stat(memcg);
540 541 542
	return 0;
}

543 544 545 546 547 548 549 550 551
static int do_launder_page(struct address_space *mapping, struct page *page)
{
	if (!PageDirty(page))
		return 0;
	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
		return 0;
	return mapping->a_ops->launder_page(page);
}

Linus Torvalds's avatar
Linus Torvalds committed
552 553
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
554
 * @mapping: the address_space
Linus Torvalds's avatar
Linus Torvalds committed
555 556 557 558 559 560
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
561
 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds's avatar
Linus Torvalds committed
562 563 564 565
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
566
	pgoff_t indices[PAGEVEC_SIZE];
Linus Torvalds's avatar
Linus Torvalds committed
567
	struct pagevec pvec;
568
	pgoff_t index;
Linus Torvalds's avatar
Linus Torvalds committed
569 570
	int i;
	int ret = 0;
571
	int ret2 = 0;
Linus Torvalds's avatar
Linus Torvalds committed
572 573
	int did_range_unmap = 0;

574
	cleancache_invalidate_inode(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
575
	pagevec_init(&pvec, 0);
576
	index = start;
577 578 579
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
580
		for (i = 0; i < pagevec_count(&pvec); i++) {
Linus Torvalds's avatar
Linus Torvalds committed
581
			struct page *page = pvec.pages[i];
582 583

			/* We rely upon deletion not changing page->index */
584
			index = indices[i];
585 586
			if (index > end)
				break;
Linus Torvalds's avatar
Linus Torvalds committed
587

588 589 590 591 592
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

Linus Torvalds's avatar
Linus Torvalds committed
593
			lock_page(page);
594
			WARN_ON(page->index != index);
Linus Torvalds's avatar
Linus Torvalds committed
595 596 597 598 599
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			wait_on_page_writeback(page);
600
			if (page_mapped(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
601 602 603 604 605
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
					unmap_mapping_range(mapping,
606 607 608
					   (loff_t)index << PAGE_CACHE_SHIFT,
					   (loff_t)(1 + end - index)
							 << PAGE_CACHE_SHIFT,
Linus Torvalds's avatar
Linus Torvalds committed
609 610 611 612 613 614 615
					    0);
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
					unmap_mapping_range(mapping,
616 617
					   (loff_t)index << PAGE_CACHE_SHIFT,
					   PAGE_CACHE_SIZE, 0);
Linus Torvalds's avatar
Linus Torvalds committed
618 619
				}
			}
620
			BUG_ON(page_mapped(page));
621 622 623
			ret2 = do_launder_page(mapping, page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping, page))
624
					ret2 = -EBUSY;
625 626 627
			}
			if (ret2 < 0)
				ret = ret2;
Linus Torvalds's avatar
Linus Torvalds committed
628 629
			unlock_page(page);
		}
630
		pagevec_remove_exceptionals(&pvec);
Linus Torvalds's avatar
Linus Torvalds committed
631 632
		pagevec_release(&pvec);
		cond_resched();
633
		index++;
Linus Torvalds's avatar
Linus Torvalds committed
634
	}
635
	cleancache_invalidate_inode(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
636 637 638 639 640 641
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
642
 * @mapping: the address_space
Linus Torvalds's avatar
Linus Torvalds committed
643 644 645 646
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
647
 * Returns -EBUSY if any pages could not be invalidated.
Linus Torvalds's avatar
Linus Torvalds committed
648 649 650 651 652 653
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
npiggin@suse.de's avatar
npiggin@suse.de committed
654 655 656 657

/**
 * truncate_pagecache - unmap and remove pagecache that has been truncated
 * @inode: inode
658
 * @newsize: new file size
npiggin@suse.de's avatar
npiggin@suse.de committed
659 660 661 662 663 664 665 666 667 668 669
 *
 * inode's new i_size must already be written before truncate_pagecache
 * is called.
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
670
void truncate_pagecache(struct inode *inode, loff_t newsize)
npiggin@suse.de's avatar
npiggin@suse.de committed
671
{
672
	struct address_space *mapping = inode->i_mapping;
673
	loff_t holebegin = round_up(newsize, PAGE_SIZE);
674 675 676 677 678 679 680 681 682 683

	/*
	 * unmap_mapping_range is called twice, first simply for
	 * efficiency so that truncate_inode_pages does fewer
	 * single-page unmaps.  However after this first call, and
	 * before truncate_inode_pages finishes, it is possible for
	 * private pages to be COWed, which remain after
	 * truncate_inode_pages finishes, hence the second
	 * unmap_mapping_range call must be made for correctness.
	 */
684 685 686
	unmap_mapping_range(mapping, holebegin, 0, 1);
	truncate_inode_pages(mapping, newsize);
	unmap_mapping_range(mapping, holebegin, 0, 1);
npiggin@suse.de's avatar
npiggin@suse.de committed
687 688 689
}
EXPORT_SYMBOL(truncate_pagecache);

690 691 692 693 694
/**
 * truncate_setsize - update inode and pagecache for a new file size
 * @inode: inode
 * @newsize: new file size
 *
695 696 697
 * truncate_setsize updates i_size and performs pagecache truncation (if
 * necessary) to @newsize. It will be typically be called from the filesystem's
 * setattr function when ATTR_SIZE is passed in.
698
 *
699 700 701
 * Must be called with a lock serializing truncates and writes (generally
 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 * specific block truncation has been performed.
702 703 704
 */
void truncate_setsize(struct inode *inode, loff_t newsize)
{
705 706
	loff_t oldsize = inode->i_size;

707
	i_size_write(inode, newsize);
708 709
	if (newsize > oldsize)
		pagecache_isize_extended(inode, oldsize, newsize);
710
	truncate_pagecache(inode, newsize);
711 712 713
}
EXPORT_SYMBOL(truncate_setsize);

714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
/**
 * pagecache_isize_extended - update pagecache after extension of i_size
 * @inode:	inode for which i_size was extended
 * @from:	original inode size
 * @to:		new inode size
 *
 * Handle extension of inode size either caused by extending truncate or by
 * write starting after current i_size. We mark the page straddling current
 * i_size RO so that page_mkwrite() is called on the nearest write access to
 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 * the page before user writes to the page via mmap after the i_size has been
 * changed.
 *
 * The function must be called after i_size is updated so that page fault
 * coming after we unlock the page will already see the new i_size.
 * The function must be called while we still hold i_mutex - this not only
 * makes sure i_size is stable but also that userspace cannot observe new
 * i_size value before we are prepared to store mmap writes at new inode size.
 */
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
	int bsize = 1 << inode->i_blkbits;
	loff_t rounded_from;
	struct page *page;
	pgoff_t index;

	WARN_ON(to > inode->i_size);

	if (from >= to || bsize == PAGE_CACHE_SIZE)
		return;
	/* Page straddling @from will not have any hole block created? */
	rounded_from = round_up(from, bsize);
	if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
		return;

	index = from >> PAGE_CACHE_SHIFT;
	page = find_lock_page(inode->i_mapping, index);
	/* Page not cached? Nothing to do */
	if (!page)
		return;
	/*
	 * See clear_page_dirty_for_io() for details why set_page_dirty()
	 * is needed.
	 */
	if (page_mkclean(page))
		set_page_dirty(page);
	unlock_page(page);
	page_cache_release(page);
}
EXPORT_SYMBOL(pagecache_isize_extended);

765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
787 788
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
789 790 791 792 793 794 795 796 797 798 799 800 801
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
EXPORT_SYMBOL(truncate_pagecache_range);