glock.c 50.5 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9
 */

10 11
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

David Teigland's avatar
David Teigland committed
12 13 14 15 16 17 18
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
19
#include <linux/kallsyms.h>
20
#include <linux/gfs2_ondisk.h>
21
#include <linux/list.h>
22
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
23
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
24
#include <asm/uaccess.h>
25 26
#include <linux/seq_file.h>
#include <linux/debugfs.h>
27 28
#include <linux/kthread.h>
#include <linux/freezer.h>
29 30
#include <linux/workqueue.h>
#include <linux/jiffies.h>
31 32 33
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
34
#include <linux/percpu.h>
35
#include <linux/list_sort.h>
36
#include <linux/lockref.h>
37
#include <linux/rhashtable.h>
David Teigland's avatar
David Teigland committed
38 39

#include "gfs2.h"
40
#include "incore.h"
David Teigland's avatar
David Teigland committed
41 42 43 44 45 46 47
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
48
#include "util.h"
49
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
50 51
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
52

53
struct gfs2_glock_iter {
54
	struct gfs2_sbd *sdp;		/* incore superblock           */
55
	struct rhashtable_iter hti;	/* rhashtable iterator         */
56 57
	struct gfs2_glock *gl;		/* current glock struct        */
	loff_t last_pos;		/* last position               */
58 59
};

David Teigland's avatar
David Teigland committed
60 61
typedef void (*glock_examiner) (struct gfs2_glock * gl);

62
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
63

64
static struct dentry *gfs2_root;
65
static struct workqueue_struct *glock_workqueue;
66
struct workqueue_struct *gfs2_delete_workqueue;
67 68
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
69
static DEFINE_SPINLOCK(lru_lock);
70

71
#define GFS2_GL_HASH_SHIFT      15
72 73
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)

74 75 76 77 78 79
static struct rhashtable_params ht_parms = {
	.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
	.key_len = sizeof(struct lm_lockname),
	.key_offset = offsetof(struct gfs2_glock, gl_name),
	.head_offset = offsetof(struct gfs2_glock, gl_node),
};
David Teigland's avatar
David Teigland committed
80

81
static struct rhashtable gl_hash_table;
David Teigland's avatar
David Teigland committed
82

83
void gfs2_glock_free(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
84
{
85
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
David Teigland's avatar
David Teigland committed
86

87
	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
88
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
89
	} else {
90
		kfree(gl->gl_lksb.sb_lvbptr);
91
		kmem_cache_free(gfs2_glock_cachep, gl);
92
	}
93 94
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
David Teigland's avatar
David Teigland committed
95 96 97 98 99 100 101 102
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

103
static void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
104
{
105 106
	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
	lockref_get(&gl->gl_lockref);
David Teigland's avatar
David Teigland committed
107 108
}

109 110 111 112 113 114 115 116 117 118 119 120 121
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
122
	if (!list_empty(&gl->gl_holders))
123 124 125 126 127 128
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

129

130 131 132 133 134 135 136 137 138 139
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);

	if (!list_empty(&gl->gl_lru))
		list_del_init(&gl->gl_lru);
	else
		atomic_inc(&lru_count);

	list_add_tail(&gl->gl_lru, &lru_list);
140
	set_bit(GLF_LRU, &gl->gl_flags);
141 142 143
	spin_unlock(&lru_lock);
}

144
static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
145
{
146
	spin_lock(&lru_lock);
147 148 149 150 151 152 153 154
	if (!list_empty(&gl->gl_lru)) {
		list_del_init(&gl->gl_lru);
		atomic_dec(&lru_count);
		clear_bit(GLF_LRU, &gl->gl_flags);
	}
	spin_unlock(&lru_lock);
}

David Teigland's avatar
David Teigland committed
155 156 157 158 159 160
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

161
void gfs2_glock_put(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
162
{
163
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
164
	struct address_space *mapping = gfs2_glock2aspace(gl);
David Teigland's avatar
David Teigland committed
165

166 167 168 169 170
	if (lockref_put_or_lock(&gl->gl_lockref))
		return;

	lockref_mark_dead(&gl->gl_lockref);

171
	gfs2_glock_remove_from_lru(gl);
172
	spin_unlock(&gl->gl_lockref.lock);
173
	rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
174 175 176 177
	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
	trace_gfs2_glock_put(gl);
	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teigland's avatar
David Teigland committed
178 179
}

180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
198 199 200 201 202 203
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
204 205 206 207 208 209 210 211
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
212
	smp_mb__after_atomic();
213 214 215
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

240 241 242 243
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
244 245
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
246 247 248
 */

static int do_promote(struct gfs2_glock *gl)
249 250
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
251 252 253 254 255 256 257 258 259 260 261 262
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
263
				spin_unlock(&gl->gl_lockref.lock);
264 265
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
266
				spin_lock(&gl->gl_lockref.lock);
267
				if (ret) {
268 269
					if (ret == 1)
						return 2;
270 271
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
272
					trace_gfs2_glock_queue(gh, 0);
273 274 275 276
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
277
				trace_gfs2_promote(gh, 1);
278 279 280 281
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
282
			trace_gfs2_promote(gh, 0);
283 284 285 286 287
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
288
		do_error(gl, 0);
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
325
		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
326
		if (held2)
327
			gl->gl_lockref.count++;
328
		else
329
			gl->gl_lockref.count--;
330
	}
331 332
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
333

334 335 336 337
	if (new_state != gl->gl_target)
		/* shorten our minimum hold time */
		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
				       GL_GLOCK_MIN_HOLD);
338 339 340 341 342 343 344 345
	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
346
	smp_mb__after_atomic();
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
362
	int rv;
363

364
	spin_lock(&gl->gl_lockref.lock);
Steven Whitehouse's avatar
Steven Whitehouse committed
365
	trace_gfs2_glock_state_change(gl, state);
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
405
			pr_err("wanted %u got %u\n", gl->gl_target, state);
406 407
			GLOCK_BUG_ON(gl, 1);
		}
408
		spin_unlock(&gl->gl_lockref.lock);
409 410 411 412 413 414 415 416
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
417
			spin_unlock(&gl->gl_lockref.lock);
418
			rv = glops->go_xmote_bh(gl, gh);
419
			spin_lock(&gl->gl_lockref.lock);
420 421 422 423 424
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
425 426 427
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
428 429 430
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
431
out_locked:
432
	spin_unlock(&gl->gl_lockref.lock);
433 434 435 436 437 438 439 440 441 442 443
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
444 445
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
446 447
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
448
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
449
	unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
450 451 452 453
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
454 455
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
456 457 458 459 460
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
461
	gl->gl_req = target;
462 463 464 465 466
	set_bit(GLF_BLOCKING, &gl->gl_flags);
	if ((gl->gl_req == LM_ST_UNLOCKED) ||
	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
		clear_bit(GLF_BLOCKING, &gl->gl_flags);
467
	spin_unlock(&gl->gl_lockref.lock);
468 469
	if (glops->go_sync)
		glops->go_sync(gl);
470 471 472 473 474
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
475 476 477
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
478
		if (ret) {
479
			pr_err("lm_lock ret %d\n", ret);
480 481
			GLOCK_BUG_ON(gl, 1);
		}
482 483
	} else { /* lock_nolock */
		finish_xmote(gl, target);
484 485 486
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	}
487

488
	spin_lock(&gl->gl_lockref.lock);
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
516 517
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
518 519
{
	struct gfs2_holder *gh = NULL;
520
	int ret;
521 522 523 524 525 526 527 528 529

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
530
			goto out_unlock;
531 532 533
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
534
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
535 536 537 538
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
539 540
		ret = do_promote(gl);
		if (ret == 0)
541
			goto out_unlock;
542
		if (ret == 2)
543
			goto out;
544 545 546 547 548 549
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
550
out:
551 552 553
	return;

out_sched:
554
	clear_bit(GLF_LOCK, &gl->gl_flags);
555
	smp_mb__after_atomic();
556
	gl->gl_lockref.count++;
557
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
558
		gl->gl_lockref.count--;
559 560
	return;

561
out_unlock:
562
	clear_bit(GLF_LOCK, &gl->gl_flags);
563
	smp_mb__after_atomic();
564
	return;
565 566
}

567 568 569
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
570
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
571
	struct gfs2_inode *ip;
572
	struct inode *inode;
573 574 575 576
	u64 no_addr = gl->gl_name.ln_number;

	ip = gl->gl_object;
	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
577 578

	if (ip)
579
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
580 581 582 583 584
	else
		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
585 586 587 588
	}
	gfs2_glock_put(gl);
}

589 590
static void glock_work_func(struct work_struct *work)
{
591
	unsigned long delay = 0;
592
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
593
	int drop_ref = 0;
594

595
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
596
		finish_xmote(gl, gl->gl_reply);
597 598
		drop_ref = 1;
	}
599
	spin_lock(&gl->gl_lockref.lock);
600
	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
601 602
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
603
		unsigned long holdtime, now = jiffies;
604

605
		holdtime = gl->gl_tchange + gl->gl_hold_time;
606 607
		if (time_before(now, holdtime))
			delay = holdtime - now;
608 609 610 611 612

		if (!delay) {
			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
			set_bit(GLF_DEMOTE, &gl->gl_flags);
		}
613 614
	}
	run_queue(gl, 0);
615
	spin_unlock(&gl->gl_lockref.lock);
616
	if (!delay)
617
		gfs2_glock_put(gl);
618 619 620 621 622 623
	else {
		if (gl->gl_name.ln_type != LM_TYPE_INODE)
			delay = 0;
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
			gfs2_glock_put(gl);
	}
624 625
	if (drop_ref)
		gfs2_glock_put(gl);
626 627
}

David Teigland's avatar
David Teigland committed
628 629 630 631 632 633 634 635 636 637 638 639 640
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

641
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
642
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
643 644
		   struct gfs2_glock **glp)
{
645
	struct super_block *s = sdp->sd_vfs;
646 647 648
	struct lm_lockname name = { .ln_number = number,
				    .ln_type = glops->go_type,
				    .ln_sbd = sdp };
649
	struct gfs2_glock *gl, *tmp = NULL;
650
	struct address_space *mapping;
651
	struct kmem_cache *cachep;
652
	int ret, tries = 0;
David Teigland's avatar
David Teigland committed
653

654 655 656
	gl = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
	if (gl && !lockref_get_not_dead(&gl->gl_lockref))
		gl = NULL;
David Teigland's avatar
David Teigland committed
657

658 659
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
660
		return 0;
661 662
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
663

664
	if (glops->go_flags & GLOF_ASPACE)
665
		cachep = gfs2_glock_aspace_cachep;
666
	else
667
		cachep = gfs2_glock_cachep;
668
	gl = kmem_cache_alloc(cachep, GFP_NOFS);
David Teigland's avatar
David Teigland committed
669 670 671
	if (!gl)
		return -ENOMEM;

672 673 674
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));

	if (glops->go_flags & GLOF_LVB) {
675
		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
676
		if (!gl->gl_lksb.sb_lvbptr) {
677 678 679 680 681
			kmem_cache_free(cachep, gl);
			return -ENOMEM;
		}
	}

682
	atomic_inc(&sdp->sd_glock_disposal);
683
	gl->gl_node.next = NULL;
684
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
685
	gl->gl_name = name;
686
	gl->gl_lockref.count = 1;
David Teigland's avatar
David Teigland committed
687
	gl->gl_state = LM_ST_UNLOCKED;
688
	gl->gl_target = LM_ST_UNLOCKED;
689
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
David Teigland's avatar
David Teigland committed
690
	gl->gl_ops = glops;
691 692 693 694 695 696 697
	gl->gl_dstamp = ktime_set(0, 0);
	preempt_disable();
	/* We use the global stats to estimate the initial per-glock stats */
	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
	preempt_enable();
	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
698
	gl->gl_tchange = jiffies;
699
	gl->gl_object = NULL;
700
	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
701
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
702
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
703

704 705 706 707 708 709
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
710
		mapping->private_data = NULL;
711
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
712 713
	}

714 715 716 717 718 719
again:
	ret = rhashtable_lookup_insert_fast(&gl_hash_table, &gl->gl_node,
					    ht_parms);
	if (ret == 0) {
		*glp = gl;
		return 0;
David Teigland's avatar
David Teigland committed
720 721
	}

722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
	if (ret == -EEXIST) {
		ret = 0;
		tmp = rhashtable_lookup_fast(&gl_hash_table, &name, ht_parms);
		if (tmp == NULL || !lockref_get_not_dead(&tmp->gl_lockref)) {
			if (++tries < 100) {
				cond_resched();
				goto again;
			}
			tmp = NULL;
			ret = -ENOMEM;
		}
	} else {
		WARN_ON_ONCE(ret);
	}
	kfree(gl->gl_lksb.sb_lvbptr);
	kmem_cache_free(cachep, gl);
	atomic_dec(&sdp->sd_glock_disposal);
	*glp = tmp;
David Teigland's avatar
David Teigland committed
740

741
	return ret;
David Teigland's avatar
David Teigland committed
742 743 744 745 746 747 748 749 750 751 752
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

753
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
David Teigland's avatar
David Teigland committed
754 755 756 757
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
758
	gh->gh_ip = _RET_IP_;
759
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

777
void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
778 779
{
	gh->gh_state = state;
780
	gh->gh_flags = flags;
781
	gh->gh_iflags = 0;
782
	gh->gh_ip = _RET_IP_;
783
	put_pid(gh->gh_owner_pid);
Bob Peterson's avatar
Bob Peterson committed
784
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
785 786 787 788 789 790 791 792 793 794
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
795
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
796 797
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
798
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
799 800
}

801 802 803 804 805 806 807 808
/**
 * gfs2_glock_wait - wait on a glock acquisition
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

int gfs2_glock_wait(struct gfs2_holder *gh)
809
{
810 811
	unsigned long time1 = jiffies;

812
	might_sleep();
813
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
814 815 816 817 818
	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
		/* Lengthen the minimum hold time. */
		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
					      GL_GLOCK_HOLD_INCR,
					      GL_GLOCK_MAX_HOLD);
819
	return gh->gh_error;
820 821
}

David Teigland's avatar
David Teigland committed
822
/**
823 824 825
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
826
 *
827 828
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
829 830
 */

831
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
832
			    unsigned long delay, bool remote)
David Teigland's avatar
David Teigland committed
833
{
834
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
835

836 837 838 839 840 841 842
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
843
	}
844
	if (gl->gl_ops->go_callback)
845
		gl->gl_ops->go_callback(gl, remote);
846
	trace_gfs2_demote_rq(gl, remote);
David Teigland's avatar
David Teigland committed
847 848
}

849
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
850
{
851
	struct va_format vaf;
852 853 854
	va_list args;

	va_start(args, fmt);
855

856
	if (seq) {
857
		seq_vprintf(seq, fmt, args);
858
	} else {
859 860 861
		vaf.fmt = fmt;
		vaf.va = &args;

862
		pr_err("%pV", &vaf);
863
	}
864

865 866 867
	va_end(args);
}

David Teigland's avatar
David Teigland committed
868 869 870 871
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
872 873 874 875
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
876 877
 */

878
static inline void add_to_queue(struct gfs2_holder *gh)
879 880
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
David Teigland's avatar
David Teigland committed
881 882
{
	struct gfs2_glock *gl = gh->gh_gl;
883
	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
884 885
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
886
	int try_futile = 0;
David Teigland's avatar
David Teigland committed
887

888
	BUG_ON(gh->gh_owner_pid == NULL);
889 890
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
891

892 893
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
894
			try_futile = !may_grant(gl, gh);
895 896 897 898 899 900 901 902
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
903 904
		if (try_futile &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
905 906 907 908
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
909
		}
910 911 912 913 914
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
915
	set_bit(GLF_QUEUED, &gl->gl_flags);
916
	trace_gfs2_glock_queue(gh, 1);
917 918
	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
919 920 921 922 923 924 925 926 927 928
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
929
		spin_unlock(&gl->gl_lockref.lock);
Steven Whitehouse's avatar
Steven Whitehouse committed
930
		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
931
			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
932
		spin_lock(&gl->gl_lockref.lock);
David Teigland's avatar
David Teigland committed
933
	}
934
	return;
David Teigland's avatar
David Teigland committed
935

936
trap_recursive:
937 938 939
	pr_err("original: %pSR\n", (void *)gh2->gh_ip);
	pr_err("pid: %d\n", pid_nr(gh2->gh_owner_pid));
	pr_err("lock type: %d req lock state : %d\n",
940
	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
941 942 943
	pr_err("new: %pSR\n", (void *)gh->gh_ip);
	pr_err("pid: %d\n", pid_nr(gh->gh_owner_pid));
	pr_err("lock type: %d req lock state : %d\n",