page-writeback.c 36.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
 * mm/page-writeback.c
Linus Torvalds's avatar
Linus Torvalds committed
3
4
 *
 * Copyright (C) 2002, Linus Torvalds.
5
 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Linus Torvalds's avatar
Linus Torvalds committed
6
7
8
9
 *
 * Contains functions related to writing back dirty pages at the
 * address_space level.
 *
10
 * 10Apr2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *		Initial version
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/init.h>
#include <linux/backing-dev.h>
25
#include <linux/task_io_accounting_ops.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
27
#include <linux/blkdev.h>
#include <linux/mpage.h>
28
#include <linux/rmap.h>
Linus Torvalds's avatar
Linus Torvalds committed
29
30
31
32
33
34
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
35
#include <linux/buffer_head.h>
36
#include <linux/pagevec.h>
Linus Torvalds's avatar
Linus Torvalds committed
37
38
39

/*
 * The maximum number of pages to writeout in a single bdflush/kupdate
Joern Engel's avatar
Joern Engel committed
40
 * operation.  We do this so we don't hold I_SYNC against an inode for
Linus Torvalds's avatar
Linus Torvalds committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
 * enormous amounts of time, which would block a userspace task which has
 * been forced to throttle against that inode.  Also, the code reevaluates
 * the dirty each time it has written this many pages.
 */
#define MAX_WRITEBACK_PAGES	1024

/*
 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
 * will look to see if it needs to force writeback or throttling.
 */
static long ratelimit_pages = 32;

/*
 * When balance_dirty_pages decides that the caller needs to perform some
 * non-background writeback, this is how many pages it will attempt to write.
 * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
 * large amounts of I/O are submitted.
 */
static inline long sync_writeback_pages(void)
{
	return ratelimit_pages + ratelimit_pages / 2;
}

/* The following parameters are exported via /proc/sys/vm */

/*
 * Start background writeback (via pdflush) at this percentage
 */
69
int dirty_background_ratio = 5;
Linus Torvalds's avatar
Linus Torvalds committed
70

71
72
73
74
75
76
/*
 * free highmem will not be subtracted from the total free memory
 * for calculating free ratios if vm_highmem_is_dirtyable is true
 */
int vm_highmem_is_dirtyable;

Linus Torvalds's avatar
Linus Torvalds committed
77
78
79
/*
 * The generator of dirty data starts writeback at this percentage
 */
80
int vm_dirty_ratio = 10;
Linus Torvalds's avatar
Linus Torvalds committed
81
82

/*
83
 * The interval between `kupdate'-style writebacks, in jiffies
Linus Torvalds's avatar
Linus Torvalds committed
84
 */
85
int dirty_writeback_interval = 5 * HZ;
Linus Torvalds's avatar
Linus Torvalds committed
86
87

/*
88
 * The longest number of jiffies for which data is allowed to remain dirty
Linus Torvalds's avatar
Linus Torvalds committed
89
 */
90
int dirty_expire_interval = 30 * HZ;
Linus Torvalds's avatar
Linus Torvalds committed
91
92
93
94
95
96
97

/*
 * Flag that makes the machine dump writes/reads and block dirtyings.
 */
int block_dump;

/*
98
99
 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
 * a full sync is triggered after this time elapses without any disk activity.
Linus Torvalds's avatar
Linus Torvalds committed
100
101
102
103
104
105
106
107
108
109
 */
int laptop_mode;

EXPORT_SYMBOL(laptop_mode);

/* End of sysctl-exported parameters */


static void background_writeout(unsigned long _min_pages);

110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
/*
 * Scale the writeback cache size proportional to the relative writeout speeds.
 *
 * We do this by keeping a floating proportion between BDIs, based on page
 * writeback completions [end_page_writeback()]. Those devices that write out
 * pages fastest will get the larger share, while the slower will get a smaller
 * share.
 *
 * We use page writeout completions because we are interested in getting rid of
 * dirty pages. Having them written out is the primary goal.
 *
 * We introduce a concept of time, a period over which we measure these events,
 * because demand can/will vary over time. The length of this period itself is
 * measured in page writeback completions.
 *
 */
static struct prop_descriptor vm_completions;
Peter Zijlstra's avatar
Peter Zijlstra committed
127
static struct prop_descriptor vm_dirties;
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153

/*
 * couple the period to the dirty_ratio:
 *
 *   period/2 ~ roundup_pow_of_two(dirty limit)
 */
static int calc_period_shift(void)
{
	unsigned long dirty_total;

	dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100;
	return 2 + ilog2(dirty_total - 1);
}

/*
 * update the period when the dirty ratio changes.
 */
int dirty_ratio_handler(struct ctl_table *table, int write,
		struct file *filp, void __user *buffer, size_t *lenp,
		loff_t *ppos)
{
	int old_ratio = vm_dirty_ratio;
	int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
	if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
		int shift = calc_period_shift();
		prop_change_shift(&vm_completions, shift);
Peter Zijlstra's avatar
Peter Zijlstra committed
154
		prop_change_shift(&vm_dirties, shift);
155
156
157
158
159
160
161
162
163
164
	}
	return ret;
}

/*
 * Increment the BDI's writeout completion count and the global writeout
 * completion count. Called from test_clear_page_writeback().
 */
static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
{
165
166
	__prop_inc_percpu_max(&vm_completions, &bdi->completions,
			      bdi->max_prop_frac);
167
168
}

169
170
171
172
173
174
175
176
177
178
void bdi_writeout_inc(struct backing_dev_info *bdi)
{
	unsigned long flags;

	local_irq_save(flags);
	__bdi_writeout_inc(bdi);
	local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(bdi_writeout_inc);

Peter Zijlstra's avatar
Peter Zijlstra committed
179
180
181
182
183
static inline void task_dirty_inc(struct task_struct *tsk)
{
	prop_inc_single(&vm_dirties, &tsk->dirties);
}

184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
/*
 * Obtain an accurate fraction of the BDI's portion.
 */
static void bdi_writeout_fraction(struct backing_dev_info *bdi,
		long *numerator, long *denominator)
{
	if (bdi_cap_writeback_dirty(bdi)) {
		prop_fraction_percpu(&vm_completions, &bdi->completions,
				numerator, denominator);
	} else {
		*numerator = 0;
		*denominator = 1;
	}
}

/*
 * Clip the earned share of dirty pages to that which is actually available.
 * This avoids exceeding the total dirty_limit when the floating averages
 * fluctuate too quickly.
 */
static void
clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
{
	long avail_dirty;

	avail_dirty = dirty -
		(global_page_state(NR_FILE_DIRTY) +
		 global_page_state(NR_WRITEBACK) +
212
213
		 global_page_state(NR_UNSTABLE_NFS) +
		 global_page_state(NR_WRITEBACK_TEMP));
214
215
216
217
218
219
220
221
222
223

	if (avail_dirty < 0)
		avail_dirty = 0;

	avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
		bdi_stat(bdi, BDI_WRITEBACK);

	*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
}

Peter Zijlstra's avatar
Peter Zijlstra committed
224
225
226
227
228
229
230
231
232
233
234
235
236
237
static inline void task_dirties_fraction(struct task_struct *tsk,
		long *numerator, long *denominator)
{
	prop_fraction_single(&vm_dirties, &tsk->dirties,
				numerator, denominator);
}

/*
 * scale the dirty limit
 *
 * task specific dirty limit:
 *
 *   dirty -= (dirty/8) * p_{t}
 */
238
static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
Peter Zijlstra's avatar
Peter Zijlstra committed
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
{
	long numerator, denominator;
	long dirty = *pdirty;
	u64 inv = dirty >> 3;

	task_dirties_fraction(tsk, &numerator, &denominator);
	inv *= numerator;
	do_div(inv, denominator);

	dirty -= inv;
	if (dirty < *pdirty/2)
		dirty = *pdirty/2;

	*pdirty = dirty;
}

255
256
257
258
259
260
261
262
263
264
265
266
/*
 *
 */
static DEFINE_SPINLOCK(bdi_lock);
static unsigned int bdi_min_ratio;

int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
{
	int ret = 0;
	unsigned long flags;

	spin_lock_irqsave(&bdi_lock, flags);
267
	if (min_ratio > bdi->max_ratio) {
268
		ret = -EINVAL;
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
	} else {
		min_ratio -= bdi->min_ratio;
		if (bdi_min_ratio + min_ratio < 100) {
			bdi_min_ratio += min_ratio;
			bdi->min_ratio += min_ratio;
		} else {
			ret = -EINVAL;
		}
	}
	spin_unlock_irqrestore(&bdi_lock, flags);

	return ret;
}

int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
{
	unsigned long flags;
	int ret = 0;

	if (max_ratio > 100)
		return -EINVAL;

	spin_lock_irqsave(&bdi_lock, flags);
	if (bdi->min_ratio > max_ratio) {
		ret = -EINVAL;
	} else {
		bdi->max_ratio = max_ratio;
		bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
	}
298
299
300
301
	spin_unlock_irqrestore(&bdi_lock, flags);

	return ret;
}
302
EXPORT_SYMBOL(bdi_set_max_ratio);
303

Linus Torvalds's avatar
Linus Torvalds committed
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
/*
 * Work out the current dirty-memory clamping and background writeout
 * thresholds.
 *
 * The main aim here is to lower them aggressively if there is a lot of mapped
 * memory around.  To avoid stressing page reclaim with lots of unreclaimable
 * pages.  It is better to clamp down on writers than to start swapping, and
 * performing lots of scanning.
 *
 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
 *
 * We don't permit the clamping level to fall below 5% - that is getting rather
 * excessive.
 *
 * We make sure that the background writeout level is below the adjusted
 * clamping level.
 */
321
322
323
324
325
326
327

static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
	int node;
	unsigned long x = 0;

328
	for_each_node_state(node, N_HIGH_MEMORY) {
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
		struct zone *z =
			&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];

		x += zone_page_state(z, NR_FREE_PAGES)
			+ zone_page_state(z, NR_INACTIVE)
			+ zone_page_state(z, NR_ACTIVE);
	}
	/*
	 * Make sure that the number of highmem pages is never larger
	 * than the number of the total dirtyable memory. This can only
	 * occur in very strange VM situations but we want to make sure
	 * that this does not occur.
	 */
	return min(x, total);
#else
	return 0;
#endif
}

Steven Rostedt's avatar
Steven Rostedt committed
348
349
350
351
352
353
354
/**
 * determine_dirtyable_memory - amount of memory that may be used
 *
 * Returns the numebr of pages that can currently be freed and used
 * by the kernel for direct mappings.
 */
unsigned long determine_dirtyable_memory(void)
355
356
357
358
359
360
{
	unsigned long x;

	x = global_page_state(NR_FREE_PAGES)
		+ global_page_state(NR_INACTIVE)
		+ global_page_state(NR_ACTIVE);
361
362
363
364

	if (!vm_highmem_is_dirtyable)
		x -= highmem_dirtyable_memory(x);

365
366
367
	return x + 1;	/* Ensure that we never return 0 */
}

368
void
369
370
get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
		 struct backing_dev_info *bdi)
Linus Torvalds's avatar
Linus Torvalds committed
371
372
373
374
375
{
	int background_ratio;		/* Percentages */
	int dirty_ratio;
	long background;
	long dirty;
376
	unsigned long available_memory = determine_dirtyable_memory();
Linus Torvalds's avatar
Linus Torvalds committed
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
	struct task_struct *tsk;

	dirty_ratio = vm_dirty_ratio;
	if (dirty_ratio < 5)
		dirty_ratio = 5;

	background_ratio = dirty_background_ratio;
	if (background_ratio >= dirty_ratio)
		background_ratio = dirty_ratio / 2;

	background = (background_ratio * available_memory) / 100;
	dirty = (dirty_ratio * available_memory) / 100;
	tsk = current;
	if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
		background += background / 4;
		dirty += dirty / 4;
	}
	*pbackground = background;
	*pdirty = dirty;
396
397

	if (bdi) {
398
		u64 bdi_dirty;
399
400
401
402
403
404
405
		long numerator, denominator;

		/*
		 * Calculate this BDI's share of the dirty ratio.
		 */
		bdi_writeout_fraction(bdi, &numerator, &denominator);

406
		bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
407
408
		bdi_dirty *= numerator;
		do_div(bdi_dirty, denominator);
409
		bdi_dirty += (dirty * bdi->min_ratio) / 100;
410
411
		if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
			bdi_dirty = dirty * bdi->max_ratio / 100;
412
413
414

		*pbdi_dirty = bdi_dirty;
		clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
Peter Zijlstra's avatar
Peter Zijlstra committed
415
		task_dirty_limit(current, pbdi_dirty);
416
	}
Linus Torvalds's avatar
Linus Torvalds committed
417
418
419
420
421
422
423
424
425
426
427
}

/*
 * balance_dirty_pages() must be called by processes which are generating dirty
 * data.  It looks at the number of dirty pages in the machine and will force
 * the caller to perform writeback if the system is over `vm_dirty_ratio'.
 * If we're over `background_thresh' then pdflush is woken to perform some
 * writeout.
 */
static void balance_dirty_pages(struct address_space *mapping)
{
428
429
	long nr_reclaimable, bdi_nr_reclaimable;
	long nr_writeback, bdi_nr_writeback;
Linus Torvalds's avatar
Linus Torvalds committed
430
431
	long background_thresh;
	long dirty_thresh;
432
	long bdi_thresh;
Linus Torvalds's avatar
Linus Torvalds committed
433
434
435
436
437
438
439
440
441
442
443
	unsigned long pages_written = 0;
	unsigned long write_chunk = sync_writeback_pages();

	struct backing_dev_info *bdi = mapping->backing_dev_info;

	for (;;) {
		struct writeback_control wbc = {
			.bdi		= bdi,
			.sync_mode	= WB_SYNC_NONE,
			.older_than_this = NULL,
			.nr_to_write	= write_chunk,
444
			.range_cyclic	= 1,
Linus Torvalds's avatar
Linus Torvalds committed
445
446
		};

447
448
		get_dirty_limits(&background_thresh, &dirty_thresh,
				&bdi_thresh, bdi);
449
450
451
452
453

		nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
					global_page_state(NR_UNSTABLE_NFS);
		nr_writeback = global_page_state(NR_WRITEBACK);

454
455
		bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
		bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
456

457
458
		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
			break;
Linus Torvalds's avatar
Linus Torvalds committed
459

460
461
462
463
464
465
466
467
468
		/*
		 * Throttle it only when the background writeback cannot
		 * catch-up. This avoids (excessively) small writeouts
		 * when the bdi limits are ramping up.
		 */
		if (nr_reclaimable + nr_writeback <
				(background_thresh + dirty_thresh) / 2)
			break;

469
470
		if (!bdi->dirty_exceeded)
			bdi->dirty_exceeded = 1;
Linus Torvalds's avatar
Linus Torvalds committed
471
472
473
474
475
476
477

		/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
		 * Unstable writes are a feature of certain networked
		 * filesystems (i.e. NFS) in which data may have been
		 * written to the server's write cache, but has not yet
		 * been flushed to permanent storage.
		 */
478
		if (bdi_nr_reclaimable) {
Linus Torvalds's avatar
Linus Torvalds committed
479
480
			writeback_inodes(&wbc);
			pages_written += write_chunk - wbc.nr_to_write;
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
			get_dirty_limits(&background_thresh, &dirty_thresh,
				       &bdi_thresh, bdi);
		}

		/*
		 * In order to avoid the stacked BDI deadlock we need
		 * to ensure we accurately count the 'dirty' pages when
		 * the threshold is low.
		 *
		 * Otherwise it would be possible to get thresh+n pages
		 * reported dirty, even though there are thresh-m pages
		 * actually dirty; with m+n sitting in the percpu
		 * deltas.
		 */
		if (bdi_thresh < 2*bdi_stat_error(bdi)) {
			bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
		} else if (bdi_nr_reclaimable) {
			bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
			bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
Linus Torvalds's avatar
Linus Torvalds committed
501
		}
502
503
504
505
506
507

		if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
			break;
		if (pages_written >= write_chunk)
			break;		/* We've done our duty */

508
		congestion_wait(WRITE, HZ/10);
Linus Torvalds's avatar
Linus Torvalds committed
509
510
	}

511
512
513
	if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
			bdi->dirty_exceeded)
		bdi->dirty_exceeded = 0;
Linus Torvalds's avatar
Linus Torvalds committed
514
515
516
517
518
519
520
521
522
523
524
525
526

	if (writeback_in_progress(bdi))
		return;		/* pdflush is already working this queue */

	/*
	 * In laptop mode, we wait until hitting the higher threshold before
	 * starting background writeout, and then write out all the way down
	 * to the lower threshold.  So slow writers cause minimal disk activity.
	 *
	 * In normal mode, we start background writeout at the lower
	 * background_thresh, to keep the amount of dirty memory low.
	 */
	if ((laptop_mode && pages_written) ||
527
528
529
			(!laptop_mode && (global_page_state(NR_FILE_DIRTY)
					  + global_page_state(NR_UNSTABLE_NFS)
					  > background_thresh)))
Linus Torvalds's avatar
Linus Torvalds committed
530
531
532
		pdflush_operation(background_writeout, 0);
}

533
void set_page_dirty_balance(struct page *page, int page_mkwrite)
534
{
535
	if (set_page_dirty(page) || page_mkwrite) {
536
537
538
539
540
541
542
		struct address_space *mapping = page_mapping(page);

		if (mapping)
			balance_dirty_pages_ratelimited(mapping);
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
543
/**
544
 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
545
 * @mapping: address_space which was dirtied
546
 * @nr_pages_dirtied: number of pages which the caller has just dirtied
Linus Torvalds's avatar
Linus Torvalds committed
547
548
549
550
551
552
553
554
555
556
 *
 * Processes which are dirtying memory should call in here once for each page
 * which was newly dirtied.  The function will periodically check the system's
 * dirty state and will initiate writeback if needed.
 *
 * On really big machines, get_writeback_state is expensive, so try to avoid
 * calling it too often (ratelimiting).  But once we're over the dirty memory
 * limit we decrease the ratelimiting by a lot, to prevent individual processes
 * from overshooting the limit by (ratelimit_pages) each.
 */
557
558
void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
					unsigned long nr_pages_dirtied)
Linus Torvalds's avatar
Linus Torvalds committed
559
{
560
561
562
	static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
	unsigned long ratelimit;
	unsigned long *p;
Linus Torvalds's avatar
Linus Torvalds committed
563
564

	ratelimit = ratelimit_pages;
565
	if (mapping->backing_dev_info->dirty_exceeded)
Linus Torvalds's avatar
Linus Torvalds committed
566
567
568
569
570
571
		ratelimit = 8;

	/*
	 * Check the rate limiting. Also, we do not want to throttle real-time
	 * tasks in balance_dirty_pages(). Period.
	 */
572
573
574
575
576
577
	preempt_disable();
	p =  &__get_cpu_var(ratelimits);
	*p += nr_pages_dirtied;
	if (unlikely(*p >= ratelimit)) {
		*p = 0;
		preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
578
579
580
		balance_dirty_pages(mapping);
		return;
	}
581
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
582
}
583
EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
Linus Torvalds's avatar
Linus Torvalds committed
584

585
void throttle_vm_writeout(gfp_t gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
586
587
588
589
590
{
	long background_thresh;
	long dirty_thresh;

        for ( ; ; ) {
591
		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
592
593
594
595
596
597
598

                /*
                 * Boost the allowable dirty threshold a bit for page
                 * allocators so they don't get DoS'ed by heavy writers
                 */
                dirty_thresh += dirty_thresh / 10;      /* wheeee... */

599
600
601
                if (global_page_state(NR_UNSTABLE_NFS) +
			global_page_state(NR_WRITEBACK) <= dirty_thresh)
                        	break;
602
                congestion_wait(WRITE, HZ/10);
603
604
605
606
607
608
609
610

		/*
		 * The caller might hold locks which can prevent IO completion
		 * or progress in the filesystem.  So we cannot just sit here
		 * waiting for IO to complete.
		 */
		if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
			break;
Linus Torvalds's avatar
Linus Torvalds committed
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
        }
}

/*
 * writeback at least _min_pages, and keep writing until the amount of dirty
 * memory is less than the background threshold, or until we're all clean.
 */
static void background_writeout(unsigned long _min_pages)
{
	long min_pages = _min_pages;
	struct writeback_control wbc = {
		.bdi		= NULL,
		.sync_mode	= WB_SYNC_NONE,
		.older_than_this = NULL,
		.nr_to_write	= 0,
		.nonblocking	= 1,
627
		.range_cyclic	= 1,
Linus Torvalds's avatar
Linus Torvalds committed
628
629
630
631
632
633
	};

	for ( ; ; ) {
		long background_thresh;
		long dirty_thresh;

634
		get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
635
636
		if (global_page_state(NR_FILE_DIRTY) +
			global_page_state(NR_UNSTABLE_NFS) < background_thresh
Linus Torvalds's avatar
Linus Torvalds committed
637
638
				&& min_pages <= 0)
			break;
639
		wbc.more_io = 0;
Linus Torvalds's avatar
Linus Torvalds committed
640
641
642
643
644
645
646
		wbc.encountered_congestion = 0;
		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
		wbc.pages_skipped = 0;
		writeback_inodes(&wbc);
		min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
		if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
			/* Wrote less than expected */
647
648
649
			if (wbc.encountered_congestion || wbc.more_io)
				congestion_wait(WRITE, HZ/10);
			else
Linus Torvalds's avatar
Linus Torvalds committed
650
651
652
653
654
655
656
657
658
659
				break;
		}
	}
}

/*
 * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
 * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
 * -1 if all pdflush threads were busy.
 */
660
int wakeup_pdflush(long nr_pages)
Linus Torvalds's avatar
Linus Torvalds committed
661
{
662
663
664
	if (nr_pages == 0)
		nr_pages = global_page_state(NR_FILE_DIRTY) +
				global_page_state(NR_UNSTABLE_NFS);
Linus Torvalds's avatar
Linus Torvalds committed
665
666
667
668
669
670
	return pdflush_operation(background_writeout, nr_pages);
}

static void wb_timer_fn(unsigned long unused);
static void laptop_timer_fn(unsigned long unused);

671
672
static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
Linus Torvalds's avatar
Linus Torvalds committed
673
674
675
676
677
678
679
680
681

/*
 * Periodic writeback of "old" data.
 *
 * Define "old": the first time one of an inode's pages is dirtied, we mark the
 * dirtying-time in the inode's address_space.  So this periodic writeback code
 * just walks the superblock inode list, writing back any inodes which are
 * older than a specific point in time.
 *
682
683
 * Try to run once per dirty_writeback_interval.  But if a writeback event
 * takes longer than a dirty_writeback_interval interval, then leave a
Linus Torvalds's avatar
Linus Torvalds committed
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
 * one-second gap.
 *
 * older_than_this takes precedence over nr_to_write.  So we'll only write back
 * all dirty pages if they are all attached to "old" mappings.
 */
static void wb_kupdate(unsigned long arg)
{
	unsigned long oldest_jif;
	unsigned long start_jif;
	unsigned long next_jif;
	long nr_to_write;
	struct writeback_control wbc = {
		.bdi		= NULL,
		.sync_mode	= WB_SYNC_NONE,
		.older_than_this = &oldest_jif,
		.nr_to_write	= 0,
		.nonblocking	= 1,
		.for_kupdate	= 1,
702
		.range_cyclic	= 1,
Linus Torvalds's avatar
Linus Torvalds committed
703
704
705
706
	};

	sync_supers();

707
	oldest_jif = jiffies - dirty_expire_interval;
Linus Torvalds's avatar
Linus Torvalds committed
708
	start_jif = jiffies;
709
	next_jif = start_jif + dirty_writeback_interval;
710
711
	nr_to_write = global_page_state(NR_FILE_DIRTY) +
			global_page_state(NR_UNSTABLE_NFS) +
Linus Torvalds's avatar
Linus Torvalds committed
712
713
			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
	while (nr_to_write > 0) {
714
		wbc.more_io = 0;
Linus Torvalds's avatar
Linus Torvalds committed
715
716
717
718
		wbc.encountered_congestion = 0;
		wbc.nr_to_write = MAX_WRITEBACK_PAGES;
		writeback_inodes(&wbc);
		if (wbc.nr_to_write > 0) {
719
			if (wbc.encountered_congestion || wbc.more_io)
720
				congestion_wait(WRITE, HZ/10);
Linus Torvalds's avatar
Linus Torvalds committed
721
722
723
724
725
726
727
			else
				break;	/* All the old data is written */
		}
		nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
	}
	if (time_before(next_jif, jiffies + HZ))
		next_jif = jiffies + HZ;
728
	if (dirty_writeback_interval)
Linus Torvalds's avatar
Linus Torvalds committed
729
730
731
732
733
734
735
		mod_timer(&wb_timer, next_jif);
}

/*
 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
 */
int dirty_writeback_centisecs_handler(ctl_table *table, int write,
736
	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
Linus Torvalds's avatar
Linus Torvalds committed
737
{
738
	proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
739
740
741
	if (dirty_writeback_interval)
		mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
	else
Linus Torvalds's avatar
Linus Torvalds committed
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
		del_timer(&wb_timer);
	return 0;
}

static void wb_timer_fn(unsigned long unused)
{
	if (pdflush_operation(wb_kupdate, 0) < 0)
		mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
}

static void laptop_flush(unsigned long unused)
{
	sys_sync();
}

static void laptop_timer_fn(unsigned long unused)
{
	pdflush_operation(laptop_flush, 0);
}

/*
 * We've spun up the disk and we're in laptop mode: schedule writeback
 * of all dirty data a few seconds from now.  If the flush is already scheduled
 * then push it back - the user is still using the disk.
 */
void laptop_io_completion(void)
{
769
	mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
Linus Torvalds's avatar
Linus Torvalds committed
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
}

/*
 * We're in laptop mode and we've just synced. The sync's writes will have
 * caused another writeback to be scheduled by laptop_io_completion.
 * Nothing needs to be written back anymore, so we unschedule the writeback.
 */
void laptop_sync_completion(void)
{
	del_timer(&laptop_mode_wb_timer);
}

/*
 * If ratelimit_pages is too high then we can get into dirty-data overload
 * if a large number of processes all perform writes at the same time.
 * If it is too low then SMP machines will call the (expensive)
 * get_writeback_state too often.
 *
 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
 * thresholds before writeback cuts in.
 *
 * But the limit should not be set too high.  Because it also controls the
 * amount of memory which the balance_dirty_pages() caller has to write back.
 * If this is too large then the caller will block on the IO queue all the
 * time.  So limit it to four megabytes - the balance_dirty_pages() caller
 * will write six megabyte chunks, max.
 */

799
void writeback_set_ratelimit(void)
Linus Torvalds's avatar
Linus Torvalds committed
800
{
801
	ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
Linus Torvalds's avatar
Linus Torvalds committed
802
803
804
805
806
807
	if (ratelimit_pages < 16)
		ratelimit_pages = 16;
	if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
		ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
}

808
static int __cpuinit
Linus Torvalds's avatar
Linus Torvalds committed
809
810
ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
{
811
	writeback_set_ratelimit();
812
	return NOTIFY_DONE;
Linus Torvalds's avatar
Linus Torvalds committed
813
814
}

815
static struct notifier_block __cpuinitdata ratelimit_nb = {
Linus Torvalds's avatar
Linus Torvalds committed
816
817
818
819
820
	.notifier_call	= ratelimit_handler,
	.next		= NULL,
};

/*
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
 * Called early on to tune the page writeback dirty limits.
 *
 * We used to scale dirty pages according to how total memory
 * related to pages that could be allocated for buffers (by
 * comparing nr_free_buffer_pages() to vm_total_pages.
 *
 * However, that was when we used "dirty_ratio" to scale with
 * all memory, and we don't do that any more. "dirty_ratio"
 * is now applied to total non-HIGHPAGE memory (by subtracting
 * totalhigh_pages from vm_total_pages), and as such we can't
 * get into the old insane situation any more where we had
 * large amounts of dirty pages compared to a small amount of
 * non-HIGHMEM memory.
 *
 * But we might still want to scale the dirty_ratio by how
 * much memory the box has..
Linus Torvalds's avatar
Linus Torvalds committed
837
838
839
 */
void __init page_writeback_init(void)
{
840
841
	int shift;

842
	mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
843
	writeback_set_ratelimit();
Linus Torvalds's avatar
Linus Torvalds committed
844
	register_cpu_notifier(&ratelimit_nb);
845
846
847

	shift = calc_period_shift();
	prop_descriptor_init(&vm_completions, shift);
Peter Zijlstra's avatar
Peter Zijlstra committed
848
	prop_descriptor_init(&vm_dirties, shift);
Linus Torvalds's avatar
Linus Torvalds committed
849
850
}

851
/**
852
 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
853
854
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
855
856
 * @writepage: function called for each page
 * @data: data passed to writepage function
857
 *
858
 * If a page is already under I/O, write_cache_pages() skips it, even
859
860
861
862
863
864
865
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
 */
866
867
868
int write_cache_pages(struct address_space *mapping,
		      struct writeback_control *wbc, writepage_t writepage,
		      void *data)
869
870
871
872
873
874
875
876
877
878
{
	struct backing_dev_info *bdi = mapping->backing_dev_info;
	int ret = 0;
	int done = 0;
	struct pagevec pvec;
	int nr_pages;
	pgoff_t index;
	pgoff_t end;		/* Inclusive */
	int scanned = 0;
	int range_whole = 0;
879
	long nr_to_write = wbc->nr_to_write;
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936

	if (wbc->nonblocking && bdi_write_congested(bdi)) {
		wbc->encountered_congestion = 1;
		return 0;
	}

	pagevec_init(&pvec, 0);
	if (wbc->range_cyclic) {
		index = mapping->writeback_index; /* Start from prev offset */
		end = -1;
	} else {
		index = wbc->range_start >> PAGE_CACHE_SHIFT;
		end = wbc->range_end >> PAGE_CACHE_SHIFT;
		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
			range_whole = 1;
		scanned = 1;
	}
retry:
	while (!done && (index <= end) &&
	       (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
					      PAGECACHE_TAG_DIRTY,
					      min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
		unsigned i;

		scanned = 1;
		for (i = 0; i < nr_pages; i++) {
			struct page *page = pvec.pages[i];

			/*
			 * At this point we hold neither mapping->tree_lock nor
			 * lock on the page itself: the page may be truncated or
			 * invalidated (changing page->mapping to NULL), or even
			 * swizzled back from swapper_space to tmpfs file
			 * mapping
			 */
			lock_page(page);

			if (unlikely(page->mapping != mapping)) {
				unlock_page(page);
				continue;
			}

			if (!wbc->range_cyclic && page->index > end) {
				done = 1;
				unlock_page(page);
				continue;
			}

			if (wbc->sync_mode != WB_SYNC_NONE)
				wait_on_page_writeback(page);

			if (PageWriteback(page) ||
			    !clear_page_dirty_for_io(page)) {
				unlock_page(page);
				continue;
			}

937
			ret = (*writepage)(page, wbc, data);
938

939
			if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
940
				unlock_page(page);
941
942
				ret = 0;
			}
943
			if (ret || (--nr_to_write <= 0))
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
				done = 1;
			if (wbc->nonblocking && bdi_write_congested(bdi)) {
				wbc->encountered_congestion = 1;
				done = 1;
			}
		}
		pagevec_release(&pvec);
		cond_resched();
	}
	if (!scanned && !done) {
		/*
		 * We hit the last page and there is more work to be done: wrap
		 * back to the start of the file
		 */
		scanned = 1;
		index = 0;
		goto retry;
	}
962
963
964
965
966
	if (!wbc->no_nrwrite_index_update) {
		if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
			mapping->writeback_index = index;
		wbc->nr_to_write = nr_to_write;
	}
967

968
969
	return ret;
}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
EXPORT_SYMBOL(write_cache_pages);

/*
 * Function used by generic_writepages to call the real writepage
 * function and set the mapping flags on error
 */
static int __writepage(struct page *page, struct writeback_control *wbc,
		       void *data)
{
	struct address_space *mapping = data;
	int ret = mapping->a_ops->writepage(page, wbc);
	mapping_set_error(mapping, ret);
	return ret;
}

/**
 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 *
 * This is a library function, which implements the writepages()
 * address_space_operation.
 */
int generic_writepages(struct address_space *mapping,
		       struct writeback_control *wbc)
{
	/* deal with chardevs and other special file */
	if (!mapping->a_ops->writepage)
		return 0;

	return write_cache_pages(mapping, wbc, __writepage, mapping);
}
1002
1003
1004

EXPORT_SYMBOL(generic_writepages);

Linus Torvalds's avatar
Linus Torvalds committed
1005
1006
int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
1007
1008
	int ret;

Linus Torvalds's avatar
Linus Torvalds committed
1009
1010
	if (wbc->nr_to_write <= 0)
		return 0;
1011
	wbc->for_writepages = 1;
Linus Torvalds's avatar
Linus Torvalds committed
1012
	if (mapping->a_ops->writepages)
1013
		ret = mapping->a_ops->writepages(mapping, wbc);
1014
1015
1016
1017
	else
		ret = generic_writepages(mapping, wbc);
	wbc->for_writepages = 0;
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
1018
1019
1020
1021
}

/**
 * write_one_page - write out a single page and optionally wait on I/O
1022
1023
 * @page: the page to write
 * @wait: if true, wait on writeout
Linus Torvalds's avatar
Linus Torvalds committed
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
 *
 * The page must be locked by the caller and will be unlocked upon return.
 *
 * write_one_page() returns a negative error code if I/O failed.
 */
int write_one_page(struct page *page, int wait)
{
	struct address_space *mapping = page->mapping;
	int ret = 0;
	struct writeback_control wbc = {
		.sync_mode = WB_SYNC_ALL,
		.nr_to_write = 1,
	};

	BUG_ON(!PageLocked(page));

	if (wait)
		wait_on_page_writeback(page);

	if (clear_page_dirty_for_io(page)) {
		page_cache_get(page);
		ret = mapping->a_ops->writepage(page, &wbc);
		if (ret == 0 && wait) {
			wait_on_page_writeback(page);
			if (PageError(page))
				ret = -EIO;
		}
		page_cache_release(page);
	} else {
		unlock_page(page);
	}
	return ret;
}
EXPORT_SYMBOL(write_one_page);

1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
/*
 * For address_spaces which do not use buffers nor write back.
 */
int __set_page_dirty_no_writeback(struct page *page)
{
	if (!PageDirty(page))
		SetPageDirty(page);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
/*
 * For address_spaces which do not use buffers.  Just tag the page as dirty in
 * its radix tree.
 *
 * This is also used when a single buffer is being dirtied: we want to set the
 * page dirty in that case, but not all the buffers.  This is a "bottom-up"
 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
 *
 * Most callers have locked the page, which pins the address_space in memory.
 * But zap_pte_range() does not lock the page, however in that case the
 * mapping is pinned by the vma's ->vm_file reference.
 *
 * We take care to handle the case where the page was truncated from the
Simon Arlott's avatar
Simon Arlott committed
1082
 * mapping by re-checking page_mapping() inside tree_lock.
Linus Torvalds's avatar
Linus Torvalds committed
1083
1084
1085
1086
1087
1088
1089
 */
int __set_page_dirty_nobuffers(struct page *page)
{
	if (!TestSetPageDirty(page)) {
		struct address_space *mapping = page_mapping(page);
		struct address_space *mapping2;

1090
1091
1092
		if (!mapping)
			return 1;

Nick Piggin's avatar
Nick Piggin committed
1093
		spin_lock_irq(&mapping->tree_lock);
1094
1095
1096
		mapping2 = page_mapping(page);
		if (mapping2) { /* Race with truncate? */
			BUG_ON(mapping2 != mapping);
1097
			WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1098
			if (mapping_cap_account_dirty(mapping)) {
1099
				__inc_zone_page_state(page, NR_FILE_DIRTY);
1100
1101
				__inc_bdi_stat(mapping->backing_dev_info,
						BDI_RECLAIMABLE);
1102
1103
				task_io_account_write(PAGE_CACHE_SIZE);
			}
1104
1105
1106
			radix_tree_tag_set(&mapping->page_tree,
				page_index(page), PAGECACHE_TAG_DIRTY);
		}
Nick Piggin's avatar
Nick Piggin committed
1107
		spin_unlock_irq(&mapping->tree_lock);
1108
1109
1110
		if (mapping->host) {
			/* !PageAnon && !swapper_space */
			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Linus Torvalds's avatar
Linus Torvalds committed
1111
		}
1112
		return 1;
Linus Torvalds's avatar
Linus Torvalds committed
1113
	}
1114
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);

/*
 * When a writepage implementation decides that it doesn't want to write this
 * page for some reason, it should redirty the locked page via
 * redirty_page_for_writepage() and it should then unlock the page and return 0
 */
int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
{
	wbc->pages_skipped++;
	return __set_page_dirty_nobuffers(page);
}
EXPORT_SYMBOL(redirty_page_for_writepage);

/*
 * If the mapping doesn't provide a set_page_dirty a_op, then
 * just fall through and assume that it wants buffer_heads.
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
1134
static int __set_page_dirty(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
1135
1136
1137
1138
1139
{
	struct address_space *mapping = page_mapping(page);

	if (likely(mapping)) {
		int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1140
1141
1142
1143
1144
#ifdef CONFIG_BLOCK
		if (!spd)
			spd = __set_page_dirty_buffers;
#endif
		return (*spd)(page);
Linus Torvalds's avatar
Linus Torvalds committed
1145
	}
1146
1147
1148
1149
	if (!PageDirty(page)) {
		if (!TestSetPageDirty(page))
			return 1;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1150
1151
	return 0;
}
Peter Zijlstra's avatar
Peter Zijlstra committed
1152

Harvey Harrison's avatar
Harvey Harrison committed
1153
int set_page_dirty(struct page *page)
Peter Zijlstra's avatar
Peter Zijlstra committed
1154
1155
1156
1157
1158
1159
{
	int ret = __set_page_dirty(page);
	if (ret)
		task_dirty_inc(current);
	return ret;
}
Linus Torvalds's avatar
Linus Torvalds committed
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
EXPORT_SYMBOL(set_page_dirty);

/*
 * set_page_dirty() is racy if the caller has no reference against
 * page->mapping->host, and if the page is unlocked.  This is because another
 * CPU could truncate the page off the mapping and then free the mapping.
 *
 * Usually, the page _is_ locked, or the caller is a user-space process which
 * holds a reference on the inode by having an open file.
 *
 * In other cases, the page should be locked before running set_page_dirty().
 */
int set_page_dirty_lock(struct page *page)
{
	int ret;

1176
	lock_page_nosync(page);
Linus Torvalds's avatar
Linus Torvalds committed
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
	ret = set_page_dirty(page);
	unlock_page(page);
	return ret;
}
EXPORT_SYMBOL(set_page_dirty_lock);

/*
 * Clear a page's dirty flag, while caring for dirty memory accounting.
 * Returns true if the page was previously dirty.
 *
 * This is for preparing to put the page under writeout.  We leave the page
 * tagged as dirty in the radix tree so that a concurrent write-for-sync
 * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
 * implementation will run either set_page_writeback() or set_page_dirty(),
 * at which stage we bring the page's dirty flag and radix-tree dirty tag
 * back into sync.
 *
 * This incoherency between the page's dirty flag and radix-tree tag is
 * unfortunate, but it only exists while the page is locked.
 */
int clear_page_dirty_for_io(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

1201
1202
	BUG_ON(!PageLocked(page));

1203
	ClearPageReclaim(page);
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
	if (mapping && mapping_cap_account_dirty(mapping)) {
		/*
		 * Yes, Virginia, this is indeed insane.
		 *
		 * We use this sequence to make sure that
		 *  (a) we account for dirty stats properly
		 *  (b) we tell the low-level filesystem to
		 *      mark the whole page dirty if it was
		 *      dirty in a pagetable. Only to then
		 *  (c) clean the page again and return 1 to
		 *      cause the writeback.
		 *
		 * This way we avoid all nasty races with the
		 * dirty bit in multiple places and clearing
		 * them concurrently from different threads.
		 *
		 * Note! Normally the "set_page_dirty(page)"
		 * has no effect on the actual dirty bit - since
		 * that will already usually be set. But we
		 * need the side effects, and it can help us
		 * avoid races.
		 *
		 * We basically use the page "master dirty bit"
		 * as a serialization point for all the different
		 * threads doing their things.
		 */
		if (page_mkclean(page))
			set_page_dirty(page);
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
		/*
		 * We carefully synchronise fault handlers against
		 * installing a dirty pte and marking the page dirty
		 * at this point. We do this by having them hold the
		 * page lock at some point after installing their
		 * pte, but before marking the page dirty.
		 * Pages are always locked coming in here, so we get
		 * the desired exclusion. See mm/memory.c:do_wp_page()
		 * for more comments.
		 */
1242
		if (TestClearPageDirty(page)) {
1243
			dec_zone_page_state(page, NR_FILE_DIRTY);
1244
1245
			dec_bdi_stat(mapping->backing_dev_info,
					BDI_RECLAIMABLE);
1246
			return 1;
Linus Torvalds's avatar
Linus Torvalds committed
1247
		}
1248
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1249
	}
1250
	return TestClearPageDirty(page);
Linus Torvalds's avatar
Linus Torvalds committed
1251
}
1252
EXPORT_SYMBOL(clear_page_dirty_for_io);
Linus Torvalds's avatar
Linus Torvalds committed
1253
1254
1255
1256
1257
1258
1259

int test_clear_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
1260
		struct backing_dev_info *bdi = mapping->backing_dev_info;
Linus Torvalds's avatar
Linus Torvalds committed
1261
1262
		unsigned long flags;

Nick Piggin's avatar
Nick Piggin committed
1263
		spin_lock_irqsave(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1264
		ret = TestClearPageWriteback(page);
1265
		if (ret) {
Linus Torvalds's avatar
Linus Torvalds committed
1266
1267
1268
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
1269
			if (bdi_cap_account_writeback(bdi)) {
1270
				__dec_bdi_stat(bdi, BDI_WRITEBACK);
1271
1272
				__bdi_writeout_inc(bdi);
			}
1273
		}
Nick Piggin's avatar
Nick Piggin committed
1274
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1275
1276
1277
	} else {
		ret = TestClearPageWriteback(page);
	}
1278
1279
	if (ret)
		dec_zone_page_state(page, NR_WRITEBACK);
Linus Torvalds's avatar
Linus Torvalds committed
1280
1281
1282
1283
1284
1285
1286
1287
1288
	return ret;
}

int test_set_page_writeback(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	int ret;

	if (mapping) {
1289
		struct backing_dev_info *bdi = mapping->backing_dev_info;
Linus Torvalds's avatar
Linus Torvalds committed
1290
1291
		unsigned long flags;

Nick Piggin's avatar
Nick Piggin committed
1292
		spin_lock_irqsave(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1293
		ret = TestSetPageWriteback(page);
1294
		if (!ret) {
Linus Torvalds's avatar
Linus Torvalds committed
1295
1296
1297
			radix_tree_tag_set(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_WRITEBACK);
1298
			if (bdi_cap_account_writeback(bdi))
1299
1300
				__inc_bdi_stat(bdi, BDI_WRITEBACK);
		}
Linus Torvalds's avatar
Linus Torvalds committed
1301
1302
1303
1304
		if (!PageDirty(page))
			radix_tree_tag_clear(&mapping->page_tree,
						page_index(page),
						PAGECACHE_TAG_DIRTY);
Nick Piggin's avatar
Nick Piggin committed
1305
		spin_unlock_irqrestore(&mapping->tree_lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1306
1307
1308
	} else {
		ret = TestSetPageWriteback(page);
	}
1309
1310
	if (!ret)
		inc_zone_page_state(page, NR_WRITEBACK);
Linus Torvalds's avatar
Linus Torvalds committed
1311
1312
1313
1314
1315
1316
	return ret;

}
EXPORT_SYMBOL(test_set_page_writeback);

/*
1317
 * Return true if any of the pages in the mapping are marked with the
Linus Torvalds's avatar
Linus Torvalds committed
1318
1319
1320
1321
1322
 * passed tag.
 */
int mapping_tagged(struct address_space *mapping, int tag)
{
	int ret;
1323
	rcu_read_lock();
Linus Torvalds's avatar
Linus Torvalds committed
1324
	ret = radix_tree_tagged(&mapping->page_tree, tag);
1325
	rcu_read_unlock();
Linus Torvalds's avatar
Linus Torvalds committed
1326
1327
1328
	return ret;
}
EXPORT_SYMBOL(mapping_tagged);