quota.c 38.5 KB
Newer Older
David Teigland's avatar
David Teigland committed
1
2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4
5
6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8
9
10
11
12
13
14
15
16
17
 */

/*
 * Quota change tags are associated with each transaction that allocates or
 * deallocates space.  Those changes are accumulated locally to each node (in a
 * per-node file) and then are periodically synced to the quota file.  This
 * avoids the bottleneck of constantly touching the quota file, but introduces
 * fuzziness in the current usage value of IDs that are being used on different
 * nodes in the cluster simultaneously.  So, it is possible for a user on
 * multiple nodes to overrun their quota, but that overrun is controlable.
18
 * Since quota tags are part of transactions, there is no need for a quota check
David Teigland's avatar
David Teigland committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
 * program to be run on node crashes or anything like that.
 *
 * There are couple of knobs that let the administrator manage the quota
 * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
 * sitting on one node before being synced to the quota file.  (The default is
 * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
 * of quota file syncs increases as the user moves closer to their limit.  The
 * more frequent the syncs, the more accurate the quota enforcement, but that
 * means that there is more contention between the nodes for the quota file.
 * The default value is one.  This sets the maximum theoretical quota overrun
 * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
 * practice, the maximum overrun you see should be much less.)  A "quota_scale"
 * number greater than one makes quota syncs more frequent and reduces the
 * maximum overrun.  Numbers less than one (but greater than zero) make quota
 * syncs less frequent.
 *
 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
 * the quota file, so it is not being constantly read.
 */

#include <linux/sched.h>
#include <linux/slab.h>
41
#include <linux/mm.h>
David Teigland's avatar
David Teigland committed
42
43
44
45
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
46
#include <linux/fs.h>
47
#include <linux/bio.h>
48
#include <linux/gfs2_ondisk.h>
49
50
#include <linux/kthread.h>
#include <linux/freezer.h>
51
#include <linux/quota.h>
52
#include <linux/dqblk_xfs.h>
David Teigland's avatar
David Teigland committed
53
54

#include "gfs2.h"
55
#include "incore.h"
David Teigland's avatar
David Teigland committed
56
57
58
59
60
61
62
63
64
#include "bmap.h"
#include "glock.h"
#include "glops.h"
#include "log.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "super.h"
#include "trans.h"
65
#include "inode.h"
66
#include "util.h"
David Teigland's avatar
David Teigland committed
67
68
69
70

#define QUOTA_USER 1
#define QUOTA_GROUP 0

71
72
73
74
75
76
struct gfs2_quota_change_host {
	u64 qc_change;
	u32 qc_flags; /* GFS2_QCF_... */
	u32 qc_id;
};

77
78
static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0);
Xu Gang's avatar
Xu Gang committed
79
static DEFINE_SPINLOCK(qd_lru_lock);
80

81
int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
82
83
84
{
	struct gfs2_quota_data *qd;
	struct gfs2_sbd *sdp;
85
	int nr_to_scan = sc->nr_to_scan;
86

87
	if (nr_to_scan == 0)
88
89
		goto out;

90
	if (!(sc->gfp_mask & __GFP_FS))
91
92
93
		return -1;

	spin_lock(&qd_lru_lock);
94
	while (nr_to_scan && !list_empty(&qd_lru_list)) {
95
96
97
98
99
100
101
102
103
104
105
		qd = list_entry(qd_lru_list.next,
				struct gfs2_quota_data, qd_reclaim);
		sdp = qd->qd_gl->gl_sbd;

		/* Free from the filesystem-specific list */
		list_del(&qd->qd_list);

		gfs2_assert_warn(sdp, !qd->qd_change);
		gfs2_assert_warn(sdp, !qd->qd_slot_count);
		gfs2_assert_warn(sdp, !qd->qd_bh_count);

106
		gfs2_glock_put(qd->qd_gl);
107
108
109
110
111
112
113
114
		atomic_dec(&sdp->sd_quota_count);

		/* Delete it from the common reclaim list */
		list_del_init(&qd->qd_reclaim);
		atomic_dec(&qd_lru_count);
		spin_unlock(&qd_lru_lock);
		kmem_cache_free(gfs2_quotad_cachep, qd);
		spin_lock(&qd_lru_lock);
115
		nr_to_scan--;
116
117
118
119
120
121
122
	}
	spin_unlock(&qd_lru_lock);

out:
	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
}

123
static u64 qd2offset(struct gfs2_quota_data *qd)
David Teigland's avatar
David Teigland committed
124
{
125
	u64 offset;
David Teigland's avatar
David Teigland committed
126

127
	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
David Teigland's avatar
David Teigland committed
128
129
130
131
132
	offset *= sizeof(struct gfs2_quota);

	return offset;
}

133
static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
David Teigland's avatar
David Teigland committed
134
135
136
137
138
		    struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd;
	int error;

139
	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
David Teigland's avatar
David Teigland committed
140
141
142
	if (!qd)
		return -ENOMEM;

143
	atomic_set(&qd->qd_count, 1);
David Teigland's avatar
David Teigland committed
144
145
146
147
	qd->qd_id = id;
	if (user)
		set_bit(QDF_USER, &qd->qd_flags);
	qd->qd_slot = -1;
148
	INIT_LIST_HEAD(&qd->qd_reclaim);
David Teigland's avatar
David Teigland committed
149

150
	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
David Teigland's avatar
David Teigland committed
151
152
153
154
155
156
157
158
			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
	if (error)
		goto fail;

	*qdp = qd;

	return 0;

159
fail:
160
	kmem_cache_free(gfs2_quotad_cachep, qd);
David Teigland's avatar
David Teigland committed
161
162
163
	return error;
}

164
static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
David Teigland's avatar
David Teigland committed
165
166
167
168
169
170
171
172
173
		  struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
	int error, found;

	*qdp = NULL;

	for (;;) {
		found = 0;
174
		spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
175
176
177
		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
			if (qd->qd_id == id &&
			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
178
179
180
181
182
183
184
				if (!atomic_read(&qd->qd_count) &&
				    !list_empty(&qd->qd_reclaim)) {
					/* Remove it from reclaim list */
					list_del_init(&qd->qd_reclaim);
					atomic_dec(&qd_lru_count);
				}
				atomic_inc(&qd->qd_count);
David Teigland's avatar
David Teigland committed
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
				found = 1;
				break;
			}
		}

		if (!found)
			qd = NULL;

		if (!qd && new_qd) {
			qd = new_qd;
			list_add(&qd->qd_list, &sdp->sd_quota_list);
			atomic_inc(&sdp->sd_quota_count);
			new_qd = NULL;
		}

200
		spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
201

202
		if (qd) {
David Teigland's avatar
David Teigland committed
203
			if (new_qd) {
204
				gfs2_glock_put(new_qd->qd_gl);
205
				kmem_cache_free(gfs2_quotad_cachep, new_qd);
David Teigland's avatar
David Teigland committed
206
207
208
209
210
211
212
213
214
215
216
217
218
219
			}
			*qdp = qd;
			return 0;
		}

		error = qd_alloc(sdp, user, id, &new_qd);
		if (error)
			return error;
	}
}

static void qd_hold(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
220
221
	gfs2_assert(sdp, atomic_read(&qd->qd_count));
	atomic_inc(&qd->qd_count);
David Teigland's avatar
David Teigland committed
222
223
224
225
}

static void qd_put(struct gfs2_quota_data *qd)
{
226
227
228
229
230
231
	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
		/* Add to the reclaim list */
		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
		atomic_inc(&qd_lru_count);
		spin_unlock(&qd_lru_lock);
	}
David Teigland's avatar
David Teigland committed
232
233
234
235
236
237
238
239
}

static int slot_get(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
	unsigned int c, o = 0, b;
	unsigned char byte = 0;

240
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
241
242

	if (qd->qd_slot_count++) {
243
		spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
244
245
246
247
248
249
250
251
252
253
254
255
		return 0;
	}

	for (c = 0; c < sdp->sd_quota_chunks; c++)
		for (o = 0; o < PAGE_SIZE; o++) {
			byte = sdp->sd_quota_bitmap[c][o];
			if (byte != 0xFF)
				goto found;
		}

	goto fail;

256
found:
David Teigland's avatar
David Teigland committed
257
258
259
260
261
262
263
264
265
266
	for (b = 0; b < 8; b++)
		if (!(byte & (1 << b)))
			break;
	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;

	if (qd->qd_slot >= sdp->sd_quota_slots)
		goto fail;

	sdp->sd_quota_bitmap[c][o] |= 1 << b;

267
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
268
269
270

	return 0;

271
fail:
David Teigland's avatar
David Teigland committed
272
	qd->qd_slot_count--;
273
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
274
275
276
277
278
279
280
	return -ENOSPC;
}

static void slot_hold(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

281
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
282
283
	gfs2_assert(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;
284
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
285
286
287
288
289
290
}

static void slot_put(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

291
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
292
293
294
295
296
	gfs2_assert(sdp, qd->qd_slot_count);
	if (!--qd->qd_slot_count) {
		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
		qd->qd_slot = -1;
	}
297
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
298
299
300
301
302
}

static int bh_get(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
303
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teigland's avatar
David Teigland committed
304
305
306
	unsigned int block, offset;
	struct buffer_head *bh;
	int error;
307
	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
David Teigland's avatar
David Teigland committed
308

309
	mutex_lock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
310
311

	if (qd->qd_bh_count++) {
312
		mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
313
314
315
316
		return 0;
	}

	block = qd->qd_slot / sdp->sd_qc_per_block;
317
	offset = qd->qd_slot % sdp->sd_qc_per_block;
David Teigland's avatar
David Teigland committed
318

319
	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
320
	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
David Teigland's avatar
David Teigland committed
321
322
	if (error)
		goto fail;
323
	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
David Teigland's avatar
David Teigland committed
324
325
326
327
328
329
330
331
332
333
334
	if (error)
		goto fail;
	error = -EIO;
	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
		goto fail_brelse;

	qd->qd_bh = bh;
	qd->qd_bh_qc = (struct gfs2_quota_change *)
		(bh->b_data + sizeof(struct gfs2_meta_header) +
		 offset * sizeof(struct gfs2_quota_change));

Josef Whiter's avatar
Josef Whiter committed
335
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
336
337
338

	return 0;

339
fail_brelse:
David Teigland's avatar
David Teigland committed
340
	brelse(bh);
341
fail:
David Teigland's avatar
David Teigland committed
342
	qd->qd_bh_count--;
343
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
344
345
346
347
348
349
350
	return error;
}

static void bh_put(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

351
	mutex_lock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
352
353
354
355
356
357
	gfs2_assert(sdp, qd->qd_bh_count);
	if (!--qd->qd_bh_count) {
		brelse(qd->qd_bh);
		qd->qd_bh = NULL;
		qd->qd_bh_qc = NULL;
	}
358
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
359
360
361
362
363
364
365
366
367
368
369
370
371
}

static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
	struct gfs2_quota_data *qd = NULL;
	int error;
	int found = 0;

	*qdp = NULL;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return 0;

372
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
373
374
375
376
377
378
379
380
381
382

	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
			continue;

		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);

		set_bit(QDF_LOCKED, &qd->qd_flags);
383
384
		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
		atomic_inc(&qd->qd_count);
David Teigland's avatar
David Teigland committed
385
386
387
388
389
390
391
392
393
394
395
		qd->qd_change_sync = qd->qd_change;
		gfs2_assert_warn(sdp, qd->qd_slot_count);
		qd->qd_slot_count++;
		found = 1;

		break;
	}

	if (!found)
		qd = NULL;

396
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420

	if (qd) {
		gfs2_assert_warn(sdp, qd->qd_change_sync);
		error = bh_get(qd);
		if (error) {
			clear_bit(QDF_LOCKED, &qd->qd_flags);
			slot_put(qd);
			qd_put(qd);
			return error;
		}
	}

	*qdp = qd;

	return 0;
}

static int qd_trylock(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

	if (sdp->sd_vfs->s_flags & MS_RDONLY)
		return 0;

421
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
422
423
424

	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
425
		spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
426
427
428
429
430
431
		return 0;
	}

	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);

	set_bit(QDF_LOCKED, &qd->qd_flags);
432
433
	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
	atomic_inc(&qd->qd_count);
David Teigland's avatar
David Teigland committed
434
435
436
437
	qd->qd_change_sync = qd->qd_change;
	gfs2_assert_warn(sdp, qd->qd_slot_count);
	qd->qd_slot_count++;

438
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
439
440
441
442
443
444
445
446
447
448
449
450
451
452

	gfs2_assert_warn(sdp, qd->qd_change_sync);
	if (bh_get(qd)) {
		clear_bit(QDF_LOCKED, &qd->qd_flags);
		slot_put(qd);
		qd_put(qd);
		return 0;
	}

	return 1;
}

static void qd_unlock(struct gfs2_quota_data *qd)
{
453
454
	gfs2_assert_warn(qd->qd_gl->gl_sbd,
			 test_bit(QDF_LOCKED, &qd->qd_flags));
David Teigland's avatar
David Teigland committed
455
456
457
458
459
460
	clear_bit(QDF_LOCKED, &qd->qd_flags);
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

461
static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
David Teigland's avatar
David Teigland committed
462
463
464
465
		    struct gfs2_quota_data **qdp)
{
	int error;

466
	error = qd_get(sdp, user, id, qdp);
David Teigland's avatar
David Teigland committed
467
468
469
470
471
472
473
474
475
476
477
478
479
	if (error)
		return error;

	error = slot_get(*qdp);
	if (error)
		goto fail;

	error = bh_get(*qdp);
	if (error)
		goto fail_slot;

	return 0;

480
fail_slot:
David Teigland's avatar
David Teigland committed
481
	slot_put(*qdp);
482
fail:
David Teigland's avatar
David Teigland committed
483
484
485
486
487
488
489
490
491
492
493
	qd_put(*qdp);
	return error;
}

static void qdsb_put(struct gfs2_quota_data *qd)
{
	bh_put(qd);
	slot_put(qd);
	qd_put(qd);
}

494
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
David Teigland's avatar
David Teigland committed
495
{
496
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
497
	struct gfs2_alloc *al = ip->i_alloc;
David Teigland's avatar
David Teigland committed
498
499
500
501
502
503
504
505
506
507
	struct gfs2_quota_data **qd = al->al_qd;
	int error;

	if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
		return -EIO;

	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
		return 0;

508
	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
David Teigland's avatar
David Teigland committed
509
510
511
512
513
	if (error)
		goto out;
	al->al_qd_num++;
	qd++;

514
	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
David Teigland's avatar
David Teigland committed
515
516
517
518
519
	if (error)
		goto out;
	al->al_qd_num++;
	qd++;

520
	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
521
		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
David Teigland's avatar
David Teigland committed
522
523
524
525
526
527
		if (error)
			goto out;
		al->al_qd_num++;
		qd++;
	}

528
	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
529
		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
David Teigland's avatar
David Teigland committed
530
531
532
533
534
535
		if (error)
			goto out;
		al->al_qd_num++;
		qd++;
	}

536
out:
David Teigland's avatar
David Teigland committed
537
538
539
540
541
542
543
	if (error)
		gfs2_quota_unhold(ip);
	return error;
}

void gfs2_quota_unhold(struct gfs2_inode *ip)
{
544
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
545
	struct gfs2_alloc *al = ip->i_alloc;
David Teigland's avatar
David Teigland committed
546
547
548
549
550
551
552
553
554
555
556
557
558
	unsigned int x;

	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));

	for (x = 0; x < al->al_qd_num; x++) {
		qdsb_put(al->al_qd[x]);
		al->al_qd[x] = NULL;
	}
	al->al_qd_num = 0;
}

static int sort_qd(const void *a, const void *b)
{
559
560
	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
David Teigland's avatar
David Teigland committed
561
562
563
564

	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
		if (test_bit(QDF_USER, &qd_a->qd_flags))
565
			return -1;
David Teigland's avatar
David Teigland committed
566
		else
567
			return 1;
David Teigland's avatar
David Teigland committed
568
	}
569
570
571
572
	if (qd_a->qd_id < qd_b->qd_id)
		return -1;
	if (qd_a->qd_id > qd_b->qd_id)
		return 1;
David Teigland's avatar
David Teigland committed
573

574
	return 0;
David Teigland's avatar
David Teigland committed
575
576
}

577
static void do_qc(struct gfs2_quota_data *qd, s64 change)
David Teigland's avatar
David Teigland committed
578
579
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
580
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
David Teigland's avatar
David Teigland committed
581
	struct gfs2_quota_change *qc = qd->qd_bh_qc;
582
	s64 x;
David Teigland's avatar
David Teigland committed
583

584
	mutex_lock(&sdp->sd_quota_mutex);
585
	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
David Teigland's avatar
David Teigland committed
586
587
588
589
590
591
592
593
594

	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
		qc->qc_change = 0;
		qc->qc_flags = 0;
		if (test_bit(QDF_USER, &qd->qd_flags))
			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
		qc->qc_id = cpu_to_be32(qd->qd_id);
	}

595
	x = be64_to_cpu(qc->qc_change) + change;
David Teigland's avatar
David Teigland committed
596
597
	qc->qc_change = cpu_to_be64(x);

598
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
599
	qd->qd_change = x;
600
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
601
602
603
604
605
606
607
608
609
610
611
612

	if (!x) {
		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
		clear_bit(QDF_CHANGE, &qd->qd_flags);
		qc->qc_flags = 0;
		qc->qc_id = 0;
		slot_put(qd);
		qd_put(qd);
	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
		qd_hold(qd);
		slot_hold(qd);
	}
613

614
	mutex_unlock(&sdp->sd_quota_mutex);
David Teigland's avatar
David Teigland committed
615
616
}

617
/**
618
619
620
 * gfs2_adjust_quota - adjust record of current block usage
 * @ip: The quota inode
 * @loc: Offset of the entry in the quota file
621
 * @change: The amount of usage change to record
622
 * @qd: The quota data
623
 * @fdq: The updated limits to record
624
625
626
 *
 * This function was mostly borrowed from gfs2_block_truncate_page which was
 * in turn mostly borrowed from ext3
627
628
 *
 * Returns: 0 or -ve on error
629
 */
630

631
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
632
633
			     s64 change, struct gfs2_quota_data *qd,
			     struct fs_disk_quota *fdq)
634
{
635
	struct inode *inode = &ip->i_inode;
636
	struct gfs2_sbd *sdp = GFS2_SB(inode);
637
638
	struct address_space *mapping = inode->i_mapping;
	unsigned long index = loc >> PAGE_CACHE_SHIFT;
639
	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
640
	unsigned blocksize, iblock, pos;
641
	struct buffer_head *bh, *dibh;
642
	struct page *page;
643
644
645
	void *kaddr, *ptr;
	struct gfs2_quota q, *qp;
	int err, nbytes;
646
	u64 size;
647

648
	if (gfs2_is_stuffed(ip))
649
		gfs2_unstuff_dinode(ip, NULL);
650
651
652
653
654
655
656
657
658
659
660
661
662
663

	memset(&q, 0, sizeof(struct gfs2_quota));
	err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
	if (err < 0)
		return err;

	err = -EIO;
	qp = &q;
	qp->qu_value = be64_to_cpu(qp->qu_value);
	qp->qu_value += change;
	qp->qu_value = cpu_to_be64(qp->qu_value);
	qd->qd_qb.qb_value = qp->qu_value;
	if (fdq) {
		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
664
			qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
665
666
667
			qd->qd_qb.qb_warn = qp->qu_warn;
		}
		if (fdq->d_fieldmask & FS_DQ_BHARD) {
668
			qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
669
670
			qd->qd_qb.qb_limit = qp->qu_limit;
		}
671
672
673
674
		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
			qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
			qd->qd_qb.qb_value = qp->qu_value;
		}
675
676
677
678
679
680
	}

	/* Write the quota into the quota file on disk */
	ptr = qp;
	nbytes = sizeof(struct gfs2_quota);
get_a_page:
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
	page = grab_cache_page(mapping, index);
	if (!page)
		return -ENOMEM;

	blocksize = inode->i_sb->s_blocksize;
	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);

	if (!page_has_buffers(page))
		create_empty_buffers(page, blocksize, 0);

	bh = page_buffers(page);
	pos = blocksize;
	while (offset >= pos) {
		bh = bh->b_this_page;
		iblock++;
		pos += blocksize;
	}

	if (!buffer_mapped(bh)) {
700
		gfs2_block_map(inode, iblock, bh, 1);
701
		if (!buffer_mapped(bh))
702
703
			goto unlock_out;
		/* If it's a newly allocated disk block for quota, zero it */
Abhijith Das's avatar
Abhijith Das committed
704
705
		if (buffer_new(bh))
			zero_user(page, pos - blocksize, bh->b_size);
706
707
708
709
710
711
	}

	if (PageUptodate(page))
		set_buffer_uptodate(bh);

	if (!buffer_uptodate(bh)) {
712
		ll_rw_block(READ | REQ_META, 1, &bh);
713
714
		wait_on_buffer(bh);
		if (!buffer_uptodate(bh))
715
			goto unlock_out;
716
717
718
719
720
	}

	gfs2_trans_add_bh(ip->i_gl, bh, 0);

	kaddr = kmap_atomic(page, KM_USER0);
721
722
723
	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
		nbytes = PAGE_CACHE_SIZE - offset;
	memcpy(kaddr + offset, ptr, nbytes);
724
725
	flush_dcache_page(page);
	kunmap_atomic(kaddr, KM_USER0);
726
727
728
729
730
	unlock_page(page);
	page_cache_release(page);

	/* If quota straddles page boundary, we need to update the rest of the
	 * quota at the beginning of the next page */
Abhijith Das's avatar
Abhijith Das committed
731
	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
732
733
734
735
736
737
		ptr = ptr + nbytes;
		nbytes = sizeof(struct gfs2_quota) - nbytes;
		offset = 0;
		index++;
		goto get_a_page;
	}
738

739
	/* Update the disk inode timestamp and size (if extended) */
740
741
	err = gfs2_meta_inode_buffer(ip, &dibh);
	if (err)
742
		goto out;
743
744

	size = loc + sizeof(struct gfs2_quota);
Steven Whitehouse's avatar
Steven Whitehouse committed
745
	if (size > inode->i_size)
746
747
748
749
750
751
752
		i_size_write(inode, size);
	inode->i_mtime = inode->i_atime = CURRENT_TIME;
	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
	gfs2_dinode_out(ip, dibh->b_data);
	brelse(dibh);
	mark_inode_dirty(inode);

753
754
755
out:
	return err;
unlock_out:
756
757
758
759
760
	unlock_page(page);
	page_cache_release(page);
	return err;
}

David Teigland's avatar
David Teigland committed
761
762
763
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
764
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
David Teigland's avatar
David Teigland committed
765
766
767
768
	unsigned int data_blocks, ind_blocks;
	struct gfs2_holder *ghs, i_gh;
	unsigned int qx, x;
	struct gfs2_quota_data *qd;
769
	loff_t offset;
770
	unsigned int nalloc = 0, blocks;
David Teigland's avatar
David Teigland committed
771
772
773
774
775
776
	struct gfs2_alloc *al = NULL;
	int error;

	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
			      &data_blocks, &ind_blocks);

Josef Bacik's avatar
Josef Bacik committed
777
	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
David Teigland's avatar
David Teigland committed
778
779
780
781
	if (!ghs)
		return -ENOMEM;

	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
782
	mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
David Teigland's avatar
David Teigland committed
783
	for (qx = 0; qx < num_qd; qx++) {
784
		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
David Teigland's avatar
David Teigland committed
785
786
787
788
789
790
791
792
793
794
795
					   GL_NOCACHE, &ghs[qx]);
		if (error)
			goto out;
	}

	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
	if (error)
		goto out;

	for (x = 0; x < num_qd; x++) {
		offset = qd2offset(qda[x]);
796
797
		if (gfs2_write_alloc_required(ip, offset,
					      sizeof(struct gfs2_quota)))
David Teigland's avatar
David Teigland committed
798
799
800
			nalloc++;
	}

801
802
803
804
805
806
807
808
809
810
811
812
	al = gfs2_alloc_get(ip);
	if (!al) {
		error = -ENOMEM;
		goto out_gunlock;
	}
	/* 
	 * 1 blk for unstuffing inode if stuffed. We add this extra
	 * block to the reservation unconditionally. If the inode
	 * doesn't need unstuffing, the block will be released to the 
	 * rgrp since it won't be allocated during the transaction
	 */
	al->al_requested = 1;
813
814
815
816
	/* +3 in the end for unstuffing block, inode size update block
	 * and another block in case quota straddles page boundary and 
	 * two blocks need to be updated instead of 1 */
	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
David Teigland's avatar
David Teigland committed
817

818
819
820
821
822
	if (nalloc)
		al->al_requested += nalloc * (data_blocks + ind_blocks);		
	error = gfs2_inplace_reserve(ip);
	if (error)
		goto out_alloc;
David Teigland's avatar
David Teigland committed
823

824
	if (nalloc)
825
		blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
826
827
828
829

	error = gfs2_trans_begin(sdp, blocks, 0);
	if (error)
		goto out_ipres;
David Teigland's avatar
David Teigland committed
830
831
832
833

	for (x = 0; x < num_qd; x++) {
		qd = qda[x];
		offset = qd2offset(qd);
834
		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
835
		if (error)
David Teigland's avatar
David Teigland committed
836
837
838
			goto out_end_trans;

		do_qc(qd, -qd->qd_change_sync);
839
		set_bit(QDF_REFRESH, &qd->qd_flags);
David Teigland's avatar
David Teigland committed
840
841
842
843
	}

	error = 0;

844
out_end_trans:
David Teigland's avatar
David Teigland committed
845
	gfs2_trans_end(sdp);
846
out_ipres:
847
	gfs2_inplace_release(ip);
848
out_alloc:
849
	gfs2_alloc_put(ip);
850
out_gunlock:
David Teigland's avatar
David Teigland committed
851
	gfs2_glock_dq_uninit(&i_gh);
852
out:
David Teigland's avatar
David Teigland committed
853
854
	while (qx--)
		gfs2_glock_dq_uninit(&ghs[qx]);
855
	mutex_unlock(&ip->i_inode.i_mutex);
David Teigland's avatar
David Teigland committed
856
	kfree(ghs);
857
	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
David Teigland's avatar
David Teigland committed
858
859
860
	return error;
}

861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
{
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
	struct gfs2_quota q;
	struct gfs2_quota_lvb *qlvb;
	loff_t pos;
	int error;

	memset(&q, 0, sizeof(struct gfs2_quota));
	pos = qd2offset(qd);
	error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
	if (error < 0)
		return error;

	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
	qlvb->__pad = 0;
	qlvb->qb_limit = q.qu_limit;
	qlvb->qb_warn = q.qu_warn;
	qlvb->qb_value = q.qu_value;
	qd->qd_qb = *qlvb;

	return 0;
}

David Teigland's avatar
David Teigland committed
886
887
888
889
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
		    struct gfs2_holder *q_gh)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
890
	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
David Teigland's avatar
David Teigland committed
891
892
893
	struct gfs2_holder i_gh;
	int error;

894
restart:
David Teigland's avatar
David Teigland committed
895
896
897
898
	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
	if (error)
		return error;

899
	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
David Teigland's avatar
David Teigland committed
900

901
	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
David Teigland's avatar
David Teigland committed
902
		gfs2_glock_dq_uninit(q_gh);
903
904
		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
					   GL_NOCACHE, q_gh);
David Teigland's avatar
David Teigland committed
905
906
907
		if (error)
			return error;

908
		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
David Teigland's avatar
David Teigland committed
909
910
911
		if (error)
			goto fail;

912
913
		error = update_qd(sdp, qd);
		if (error)
914
			goto fail_gunlock;
David Teigland's avatar
David Teigland committed
915

916
		gfs2_glock_dq_uninit(&i_gh);
917
918
919
		gfs2_glock_dq_uninit(q_gh);
		force_refresh = 0;
		goto restart;
David Teigland's avatar
David Teigland committed
920
921
922
923
	}

	return 0;

924
fail_gunlock:
David Teigland's avatar
David Teigland committed
925
	gfs2_glock_dq_uninit(&i_gh);
926
fail:
David Teigland's avatar
David Teigland committed
927
928
929
930
	gfs2_glock_dq_uninit(q_gh);
	return error;
}

931
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
David Teigland's avatar
David Teigland committed
932
{
933
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
934
	struct gfs2_alloc *al = ip->i_alloc;
935
	struct gfs2_quota_data *qd;
David Teigland's avatar
David Teigland committed
936
937
938
939
940
941
942
943
944
945
946
947
948
	unsigned int x;
	int error = 0;

	gfs2_quota_hold(ip, uid, gid);

	if (capable(CAP_SYS_RESOURCE) ||
	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
		return 0;

	sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
	     sort_qd, NULL);

	for (x = 0; x < al->al_qd_num; x++) {
949
950
951
952
953
		int force = NO_FORCE;
		qd = al->al_qd[x];
		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
			force = FORCE;
		error = do_glock(qd, force, &al->al_qd_ghs[x]);
David Teigland's avatar
David Teigland committed
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
		if (error)
			break;
	}

	if (!error)
		set_bit(GIF_QD_LOCKED, &ip->i_flags);
	else {
		while (x--)
			gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
		gfs2_quota_unhold(ip);
	}

	return error;
}

static int need_sync(struct gfs2_quota_data *qd)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
	struct gfs2_tune *gt = &sdp->sd_tune;
973
	s64 value;
David Teigland's avatar
David Teigland committed
974
975
976
977
978
979
	unsigned int num, den;
	int do_sync = 1;

	if (!qd->qd_qb.qb_limit)
		return 0;

980
	spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
981
	value = qd->qd_change;
982
	spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
983
984
985
986
987
988
989
990

	spin_lock(&gt->gt_spin);
	num = gt->gt_quota_scale_num;
	den = gt->gt_quota_scale_den;
	spin_unlock(&gt->gt_spin);

	if (value < 0)
		do_sync = 0;
991
992
	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teigland's avatar
David Teigland committed
993
994
995
		do_sync = 0;
	else {
		value *= gfs2_jindex_size(sdp) * num;
996
		value = div_s64(value, den);
997
		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
998
		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
David Teigland's avatar
David Teigland committed
999
1000
1001
1002
1003
1004
1005
1006
			do_sync = 0;
	}

	return do_sync;
}

void gfs2_quota_unlock(struct gfs2_inode *ip)
{
1007
	struct gfs2_alloc *al = ip->i_alloc;
David Teigland's avatar
David Teigland committed
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
	struct gfs2_quota_data *qda[4];
	unsigned int count = 0;
	unsigned int x;

	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
		goto out;

	for (x = 0; x < al->al_qd_num; x++) {
		struct gfs2_quota_data *qd;
		int sync;

		qd = al->al_qd[x];
		sync = need_sync(qd);

		gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);

		if (sync && qd_trylock(qd))
			qda[count++] = qd;
	}

	if (count) {
		do_sync(count, qda);
		for (x = 0; x < count; x++)
			qd_unlock(qda[x]);
	}

1034
out:
David Teigland's avatar
David Teigland committed
1035
1036
1037
1038
1039
1040
1041
1042
1043
	gfs2_quota_unhold(ip);
}

#define MAX_LINE 256

static int print_message(struct gfs2_quota_data *qd, char *type)
{
	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;

1044
	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1045
1046
1047
	       sdp->sd_fsname, type,
	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
	       qd->qd_id);
David Teigland's avatar
David Teigland committed
1048
1049
1050
1051

	return 0;
}

1052
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
David Teigland's avatar
David Teigland committed
1053
{
1054
	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1055
	struct gfs2_alloc *al = ip->i_alloc;
David Teigland's avatar
David Teigland committed
1056
	struct gfs2_quota_data *qd;
1057
	s64 value;
David Teigland's avatar
David Teigland committed
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
	unsigned int x;
	int error = 0;

	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
		return 0;

        if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
                return 0;

	for (x = 0; x < al->al_qd_num; x++) {
		qd = al->al_qd[x];

		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
			continue;

1074
		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1075
		spin_lock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
1076
		value += qd->qd_change;
1077
		spin_unlock(&qd_lru_lock);
David Teigland's avatar
David Teigland committed
1078

1079
		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
David Teigland's avatar
David Teigland committed
1080
			print_message(qd, "exceeded");
1081
1082
1083
1084
			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
					   USRQUOTA : GRPQUOTA, qd->qd_id,
					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);

David Teigland's avatar
David Teigland committed
1085
1086
			error = -EDQUOT;
			break;
1087
		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1088
			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
David Teigland's avatar
David Teigland committed
1089
			   time_after_eq(jiffies, qd->qd_last_warn +
1090
1091
					 gfs2_tune_get(sdp,
						gt_quota_warn_period) * HZ)) {
1092
1093
1094
			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
					   USRQUOTA : GRPQUOTA, qd->qd_id,
					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
David Teigland's avatar
David Teigland committed
1095
1096
1097
1098
1099
1100
1101
1102
			error = print_message(qd, "warning");
			qd->qd_last_warn = jiffies;
		}
	}

	return error;
}

1103
1104
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
		       u32 uid, u32 gid)
David Teigland's avatar
David Teigland committed
1105
{
1106
	struct gfs2_alloc *al = ip->i_alloc;
David Teigland's avatar
David Teigland committed
1107
1108
1109
	struct gfs2_quota_data *qd;
	unsigned int x;

1110
	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
David Teigland's avatar
David Teigland committed
1111
		return;
1112
	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
David Teigland's avatar
David Teigland committed
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
		return;

	for (x = 0; x < al->al_qd_num; x++) {
		qd = al->al_qd[x];

		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
			do_qc(qd, change);
		}
	}
}

1125
int gfs2_quota_sync(struct super_block *sb, int type, int wait)
David Teigland's avatar
David Teigland committed
1126
{
1127
	struct gfs2_sbd *sdp = sb->s_fs_info;
David Teigland's avatar
David Teigland committed
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
	struct gfs2_quota_data **qda;
	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
	unsigned int num_qd;
	unsigned int x;
	int error = 0;

	sdp->sd_quota_sync_gen++;

	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
	if (!qda)
		return -ENOMEM;

	do {
		num_qd = 0;

		for (;;) {
			error = qd_fish(sdp, qda + num_qd);
			if (error || !qda[num_qd])
				break;
			if (++num_qd == max_qd)
				break;
		}

		if (num_qd) {
			if (!error)
				error = do_sync(num_qd, qda);
			if (!error)
				for (x = 0; x < num_qd; x++)
					qda[x]->qd_sync_gen =
						sdp->sd_quota_sync_gen;

			for (x = 0; x < num_qd; x++)
				qd_unlock(qda[x]);
		}
	} while (!error && num_qd == max_qd);

	kfree(qda);

	return error;
}

1169
1170
1171
1172
1173
static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
{
	return gfs2_quota_sync(sb, type, 0);
}

1174
int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
David Teigland's avatar
David Teigland committed
1175
1176
1177
1178
1179
{
	struct gfs2_quota_data *qd;
	struct gfs2_holder q_gh;
	int error;

1180
	error = qd_get(sdp, user, id, &qd);
David Teigland's avatar
David Teigland committed
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
	if (error)
		return error;

	error = do_glock(qd, FORCE, &q_gh);
	if (!error)
		gfs2_glock_dq_uninit(&q_gh);

	qd_put(qd);
	return error;
}

1192
1193
1194
1195
1196
1197
1198
1199
1200
static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
{
	const struct gfs2_quota_change *str = buf;

	qc->qc_change = be64_to_cpu(str->qc_change);
	qc->qc_flags = be32_to_cpu(str->qc_flags);
	qc->qc_id = be32_to_cpu(str->qc_id);
}

David Teigland's avatar
David Teigland committed
1201
1202
int gfs2_quota_init(struct gfs2_sbd *sdp)
{
1203
	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
Steven Whitehouse's avatar
Steven Whitehouse committed
1204
1205
	u64 size = i_size_read(sdp->sd_qc_inode);
	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
David Teigland's avatar
David Teigland committed
1206
1207
	unsigned int x, slot = 0;
	unsigned int found = 0;
1208
1209
	u64 dblock;
	u32 extlen = 0;
David Teigland's avatar
David Teigland committed
1210
1211
	int error;

Steven Whitehouse's avatar
Steven Whitehouse committed
1212
	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1213
		return -EIO;
Steven Whitehouse's avatar
Steven Whitehouse committed
1214

David Teigland's avatar
David Teigland committed
1215
	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1216
	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
David Teigland's avatar
David Teigland committed
1217
1218
1219
1220

	error = -ENOMEM;

	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
Josef Bacik's avatar
Josef Bacik committed
1221
				       sizeof(unsigned char *), GFP_NOFS);
David Teigland's avatar
David Teigland committed
1222
1223
1224
1225
	if (!sdp->sd_quota_bitmap)
		return error;

	for (x = 0; x < sdp->sd_quota_chunks; x++) {
Josef Bacik's avatar
Josef Bacik committed
1226
		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
David Teigland's avatar
David Teigland committed
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
		if (!sdp->sd_quota_bitmap[x])
			goto fail;
	}

	for (x = 0; x < blocks; x++) {
		struct buffer_head *bh;
		unsigned int y;

		if (!extlen) {
			int new = 0;
1237
			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);