glock.c 46.5 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
17
#include <linux/kallsyms.h>
18
#include <linux/gfs2_ondisk.h>
19
#include <linux/list.h>
20
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
21
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
22
#include <asm/uaccess.h>
23 24
#include <linux/seq_file.h>
#include <linux/debugfs.h>
25 26
#include <linux/kthread.h>
#include <linux/freezer.h>
27 28
#include <linux/workqueue.h>
#include <linux/jiffies.h>
David Teigland's avatar
David Teigland committed
29 30

#include "gfs2.h"
31
#include "incore.h"
David Teigland's avatar
David Teigland committed
32 33 34 35 36 37 38
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
39
#include "util.h"
40
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
41 42
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
43

44
struct gfs2_gl_hash_bucket {
45
        struct hlist_head hb_list;
46 47
};

48 49 50 51 52
struct gfs2_glock_iter {
	int hash;			/* hash bucket index         */
	struct gfs2_sbd *sdp;		/* incore superblock         */
	struct gfs2_glock *gl;		/* current glock struct      */
	char string[512];		/* scratch space             */
53 54
};

David Teigland's avatar
David Teigland committed
55 56
typedef void (*glock_examiner) (struct gfs2_glock * gl);

57
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
58 59 60
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61

62
static struct dentry *gfs2_root;
63
static struct workqueue_struct *glock_workqueue;
64
struct workqueue_struct *gfs2_delete_workqueue;
65 66
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
67
static DEFINE_SPINLOCK(lru_lock);
68

69
#define GFS2_GL_HASH_SHIFT      15
70 71 72
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

73
static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
74
static struct dentry *gfs2_root;
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114

/*
 * Despite what you might think, the numbers below are not arbitrary :-)
 * They are taken from the ipv4 routing hash code, which is well tested
 * and thus should be nearly optimal. Later on we might tweek the numbers
 * but for now this should be fine.
 *
 * The reason for putting the locks in a separate array from the list heads
 * is that we can have fewer locks than list heads and save memory. We use
 * the same hash function for both, but with a different hash mask.
 */
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
	defined(CONFIG_PROVE_LOCKING)

#ifdef CONFIG_LOCKDEP
# define GL_HASH_LOCK_SZ        256
#else
# if NR_CPUS >= 32
#  define GL_HASH_LOCK_SZ       4096
# elif NR_CPUS >= 16
#  define GL_HASH_LOCK_SZ       2048
# elif NR_CPUS >= 8
#  define GL_HASH_LOCK_SZ       1024
# elif NR_CPUS >= 4
#  define GL_HASH_LOCK_SZ       512
# else
#  define GL_HASH_LOCK_SZ       256
# endif
#endif

/* We never want more locks than chains */
#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
# undef GL_HASH_LOCK_SZ
# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
#endif

static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];

static inline rwlock_t *gl_lock_addr(unsigned int x)
{
115
	return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
116 117
}
#else /* not SMP, so no spinlocks required */
Randy Dunlap's avatar
Randy Dunlap committed
118
static inline rwlock_t *gl_lock_addr(unsigned int x)
119 120 121 122
{
	return NULL;
}
#endif
123

David Teigland's avatar
David Teigland committed
124 125 126 127 128 129 130
/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

131 132
static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
133 134 135
{
	unsigned int h;

136
	h = jhash(&name->ln_number, sizeof(u64), 0);
David Teigland's avatar
David Teigland committed
137
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
138
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teigland's avatar
David Teigland committed
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
	h &= GFS2_GL_HASH_MASK;

	return h;
}

/**
 * glock_free() - Perform a few checks and then release struct gfs2_glock
 * @gl: The glock to release
 *
 * Also calls lock module to release its internal structure for this glock.
 *
 */

static void glock_free(struct gfs2_glock *gl)
{
	struct gfs2_sbd *sdp = gl->gl_sbd;
155 156
	struct address_space *mapping = gfs2_glock2aspace(gl);
	struct kmem_cache *cachep = gfs2_glock_cachep;
David Teigland's avatar
David Teigland committed
157

158
	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
Steven Whitehouse's avatar
Steven Whitehouse committed
159
	trace_gfs2_glock_put(gl);
160 161 162
	if (mapping)
		cachep = gfs2_glock_aspace_cachep;
	sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
David Teigland's avatar
David Teigland committed
163 164 165 166 167 168 169 170
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

171
void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
172
{
173
	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
174
	atomic_inc(&gl->gl_ref);
David Teigland's avatar
David Teigland committed
175 176
}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
	if (!list_empty(&gl->gl_holders))
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

197 198 199 200 201 202 203 204
/**
 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
 * @gl: the glock
 *
 */

static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
205 206 207 208 209
	int may_reclaim;
	may_reclaim = (demote_ok(gl) &&
		       (atomic_read(&gl->gl_ref) == 1 ||
			(gl->gl_name.ln_type == LM_TYPE_INODE &&
			 atomic_read(&gl->gl_ref) <= 2)));
210
	spin_lock(&lru_lock);
211
	if (list_empty(&gl->gl_lru) && may_reclaim) {
212 213 214 215 216 217
		list_add_tail(&gl->gl_lru, &lru_list);
		atomic_inc(&lru_count);
	}
	spin_unlock(&lru_lock);
}

218 219 220 221 222 223 224 225
/**
 * gfs2_glock_put_nolock() - Decrement reference count on glock
 * @gl: The glock to put
 *
 * This function should only be used if the caller has its own reference
 * to the glock, in addition to the one it is dropping.
 */

226
void gfs2_glock_put_nolock(struct gfs2_glock *gl)
227 228 229 230 231 232
{
	if (atomic_dec_and_test(&gl->gl_ref))
		GLOCK_BUG_ON(gl, 1);
	gfs2_glock_schedule_for_reclaim(gl);
}

David Teigland's avatar
David Teigland committed
233 234 235 236 237 238 239 240 241 242
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

int gfs2_glock_put(struct gfs2_glock *gl)
{
	int rv = 0;

243
	write_lock(gl_lock_addr(gl->gl_hash));
244
	if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
245
		hlist_del(&gl->gl_list);
246 247 248 249 250
		if (!list_empty(&gl->gl_lru)) {
			list_del_init(&gl->gl_lru);
			atomic_dec(&lru_count);
		}
		spin_unlock(&lru_lock);
251
		write_unlock(gl_lock_addr(gl->gl_hash));
252
		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
David Teigland's avatar
David Teigland committed
253 254 255 256
		glock_free(gl);
		rv = 1;
		goto out;
	}
257 258 259
	spin_lock(&gl->gl_spin);
	gfs2_glock_schedule_for_reclaim(gl);
	spin_unlock(&gl->gl_spin);
260
	write_unlock(gl_lock_addr(gl->gl_hash));
261
out:
David Teigland's avatar
David Teigland committed
262 263 264 265 266 267 268 269 270 271 272
	return rv;
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

273
static struct gfs2_glock *search_bucket(unsigned int hash,
274
					const struct gfs2_sbd *sdp,
275
					const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
276 277
{
	struct gfs2_glock *gl;
278
	struct hlist_node *h;
David Teigland's avatar
David Teigland committed
279

280
	hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
David Teigland's avatar
David Teigland committed
281 282
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
283 284
		if (gl->gl_sbd != sdp)
			continue;
David Teigland's avatar
David Teigland committed
285

286
		atomic_inc(&gl->gl_ref);
David Teigland's avatar
David Teigland committed
287 288 289 290 291 292 293

		return gl;
	}

	return NULL;
}

294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
312 313 314 315 316 317
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
334 335
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
336 337 338
 */

static int do_promote(struct gfs2_glock *gl)
339 340
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
				spin_unlock(&gl->gl_spin);
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
				spin_lock(&gl->gl_spin);
				if (ret) {
358 359
					if (ret == 1)
						return 2;
360 361
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
362
					trace_gfs2_glock_queue(gh, 0);
363 364 365 366
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
367
				trace_gfs2_promote(gh, 1);
368 369 370 371
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
372
			trace_gfs2_promote(gh, 0);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
		break;
	}
	return 0;
}

/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
402
		trace_gfs2_glock_queue(gh, 0);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
		gfs2_holder_wake(gh);
	}
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
		if (held2)
			gfs2_glock_hold(gl);
		else
441
			gfs2_glock_put_nolock(gl);
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	}

	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
468
	int rv;
469 470

	spin_lock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
471
	trace_gfs2_glock_state_change(gl, state);
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
			GLOCK_BUG_ON(gl, 1);
		}
		spin_unlock(&gl->gl_spin);
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
			spin_unlock(&gl->gl_spin);
			rv = glops->go_xmote_bh(gl, gh);
			spin_lock(&gl->gl_spin);
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
531 532 533
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
534 535 536
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
537
out_locked:
538 539 540 541
	spin_unlock(&gl->gl_spin);
}

static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
542
				 unsigned int req_state,
543 544 545
				 unsigned int flags)
{
	int ret = LM_OUT_ERROR;
Steven Whitehouse's avatar
Steven Whitehouse committed
546 547 548 549

	if (!sdp->sd_lockstruct.ls_ops->lm_lock)
		return req_state == LM_ST_UNLOCKED ? 0 : req_state;

550
	if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
551
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
552 553 554 555 556 557 558 559 560 561 562 563 564
							 req_state, flags);
	return ret;
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
565 566
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
	BUG_ON(gl->gl_state == target);
	BUG_ON(gl->gl_state == gl->gl_target);
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
	spin_unlock(&gl->gl_spin);
	if (glops->go_xmote_th)
		glops->go_xmote_th(gl);
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
	    gl->gl_state == LM_ST_DEFERRED) &&
	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
		lck_flags |= LM_FLAG_TRY_1CB;
594
	ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

	if (!(ret & LM_OUT_ASYNC)) {
		finish_xmote(gl, ret);
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	} else {
		GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
	}
	spin_lock(&gl->gl_spin);
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
631 632
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
633 634
{
	struct gfs2_holder *gh = NULL;
635
	int ret;
636 637 638 639 640 641 642 643 644

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
645
			goto out_unlock;
646 647 648
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
649
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
650 651 652 653
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
654 655
		ret = do_promote(gl);
		if (ret == 0)
656
			goto out_unlock;
657
		if (ret == 2)
658
			goto out;
659 660 661 662 663 664
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
665
out:
666 667 668
	return;

out_sched:
669 670
	clear_bit(GLF_LOCK, &gl->gl_flags);
	smp_mb__after_clear_bit();
671 672
	gfs2_glock_hold(gl);
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
673
		gfs2_glock_put_nolock(gl);
674 675
	return;

676
out_unlock:
677
	clear_bit(GLF_LOCK, &gl->gl_flags);
678 679
	smp_mb__after_clear_bit();
	return;
680 681
}

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct gfs2_inode *ip = NULL;
	struct inode *inode;
	u64 no_addr = 0;

	spin_lock(&gl->gl_spin);
	ip = (struct gfs2_inode *)gl->gl_object;
	if (ip)
		no_addr = ip->i_no_addr;
	spin_unlock(&gl->gl_spin);
	if (ip) {
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
		if (inode) {
			d_prune_aliases(inode);
			iput(inode);
		}
	}
	gfs2_glock_put(gl);
}

705 706
static void glock_work_func(struct work_struct *work)
{
707
	unsigned long delay = 0;
708
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
709
	struct gfs2_holder *gh;
710
	int drop_ref = 0;
711

712 713 714 715 716 717 718 719 720
	if (unlikely(test_bit(GLF_FROZEN, &gl->gl_flags))) {
		spin_lock(&gl->gl_spin);
		gh = find_first_waiter(gl);
		if (gh && (gh->gh_flags & LM_FLAG_NOEXP) &&
		    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
			set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
		spin_unlock(&gl->gl_spin);
	}

721
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
722
		finish_xmote(gl, gl->gl_reply);
723 724
		drop_ref = 1;
	}
725
	spin_lock(&gl->gl_spin);
726 727 728
	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
729 730 731 732 733 734 735
		unsigned long holdtime, now = jiffies;
		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
		if (time_before(now, holdtime))
			delay = holdtime - now;
		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
	}
	run_queue(gl, 0);
736
	spin_unlock(&gl->gl_spin);
737 738 739
	if (!delay ||
	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
		gfs2_glock_put(gl);
740 741
	if (drop_ref)
		gfs2_glock_put(gl);
742 743
}

David Teigland's avatar
David Teigland committed
744 745 746 747 748 749 750 751 752 753 754 755 756
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

757
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
758
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
759 760
		   struct gfs2_glock **glp)
{
761
	struct super_block *s = sdp->sd_vfs;
762
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teigland's avatar
David Teigland committed
763
	struct gfs2_glock *gl, *tmp;
764
	unsigned int hash = gl_hash(sdp, &name);
765
	struct address_space *mapping;
David Teigland's avatar
David Teigland committed
766

767
	read_lock(gl_lock_addr(hash));
768
	gl = search_bucket(hash, sdp, &name);
769
	read_unlock(gl_lock_addr(hash));
David Teigland's avatar
David Teigland committed
770

771 772
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
773
		return 0;
774 775
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
776

777 778 779 780
	if (glops->go_flags & GLOF_ASPACE)
		gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
	else
		gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
David Teigland's avatar
David Teigland committed
781 782 783
	if (!gl)
		return -ENOMEM;

784
	atomic_inc(&sdp->sd_glock_disposal);
785
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
786
	gl->gl_name = name;
787
	atomic_set(&gl->gl_ref, 1);
David Teigland's avatar
David Teigland committed
788
	gl->gl_state = LM_ST_UNLOCKED;
789
	gl->gl_target = LM_ST_UNLOCKED;
790
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
791
	gl->gl_hash = hash;
David Teigland's avatar
David Teigland committed
792
	gl->gl_ops = glops;
793 794 795
	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
796
	gl->gl_tchange = jiffies;
797
	gl->gl_object = NULL;
David Teigland's avatar
David Teigland committed
798
	gl->gl_sbd = sdp;
799
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
800
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
801

802 803 804 805 806 807 808 809 810
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
		mapping->assoc_mapping = NULL;
		mapping->backing_dev_info = s->s_bdi;
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
811 812
	}

813
	write_lock(gl_lock_addr(hash));
814
	tmp = search_bucket(hash, sdp, &name);
David Teigland's avatar
David Teigland committed
815
	if (tmp) {
816
		write_unlock(gl_lock_addr(hash));
David Teigland's avatar
David Teigland committed
817 818 819
		glock_free(gl);
		gl = tmp;
	} else {
820
		hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
821
		write_unlock(gl_lock_addr(hash));
David Teigland's avatar
David Teigland committed
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
	}

	*glp = gl;

	return 0;
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

838
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teigland's avatar
David Teigland committed
839 840 841 842
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
843
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
844
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

862
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
863 864
{
	gh->gh_state = state;
865
	gh->gh_flags = flags;
866
	gh->gh_iflags = 0;
867
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
Bob Peterson's avatar
Bob Peterson committed
868 869 870
	if (gh->gh_owner_pid)
		put_pid(gh->gh_owner_pid);
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
871 872 873 874 875 876 877 878 879 880
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
881
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
882 883
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
884
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
885 886
}

887 888 889 890 891 892 893 894 895 896
/**
 * gfs2_glock_holder_wait
 * @word: unused
 *
 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 * field. Thus I've separated these otherwise identical functions in
 * order to be more informative to the user.
 */

static int gfs2_glock_holder_wait(void *word)
897 898 899 900 901
{
        schedule();
        return 0;
}

902 903 904 905 906 907
static int gfs2_glock_demote_wait(void *word)
{
	schedule();
	return 0;
}

908
static void wait_on_holder(struct gfs2_holder *gh)
909
{
910
	might_sleep();
911
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
912 913
}

914
static void wait_on_demote(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
915
{
916
	might_sleep();
917
	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
David Teigland's avatar
David Teigland committed
918 919 920
}

/**
921 922 923
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
924
 *
925 926
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
927 928
 */

929
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
930
			    unsigned long delay)
David Teigland's avatar
David Teigland committed
931
{
932
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
933

934 935 936 937 938 939 940
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
941
	}
942 943
	if (gl->gl_ops->go_callback)
		gl->gl_ops->go_callback(gl);
Steven Whitehouse's avatar
Steven Whitehouse committed
944
	trace_gfs2_demote_rq(gl);
David Teigland's avatar
David Teigland committed
945 946 947
}

/**
948
 * gfs2_glock_wait - wait on a glock acquisition
David Teigland's avatar
David Teigland committed
949 950 951 952 953
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

954
int gfs2_glock_wait(struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
955
{
956
	wait_on_holder(gh);
David Teigland's avatar
David Teigland committed
957 958 959
	return gh->gh_error;
}

960
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
961 962 963 964
{
	va_list args;

	va_start(args, fmt);
965 966
	if (seq) {
		struct gfs2_glock_iter *gi = seq->private;
967
		vsprintf(gi->string, fmt, args);
968 969 970
		seq_printf(seq, gi->string);
	} else {
		printk(KERN_ERR " ");
971
		vprintk(fmt, args);
972
	}
973 974 975
	va_end(args);
}

David Teigland's avatar
David Teigland committed
976 977 978 979
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
980 981 982 983
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
984 985
 */

986
static inline void add_to_queue(struct gfs2_holder *gh)
987 988
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
David Teigland's avatar
David Teigland committed
989 990
{
	struct gfs2_glock *gl = gh->gh_gl;
991 992 993 994
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
	int try_lock = 0;
David Teigland's avatar
David Teigland committed
995

996
	BUG_ON(gh->gh_owner_pid == NULL);
997 998
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
999

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
			try_lock = 1;
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
		if (try_lock &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
		    !may_grant(gl, gh)) {
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
1018
		}
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
Steven Whitehouse's avatar
Steven Whitehouse committed
1030
	trace_gfs2_glock_queue(gh, 1);
1031 1032 1033 1034 1035
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
		spin_unlock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
1036
		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1037
			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1038
		spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1039
	}
1040
	return;
David Teigland's avatar
David Teigland committed
1041

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
trap_recursive:
	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
	__dump_glock(NULL, gl);
	BUG();
David Teigland's avatar
David Teigland committed
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
}

/**
 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 * @gh: the holder structure
 *
 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 *
 * Returns: 0, GLR_TRYFAILED, or errno on failure
 */

int gfs2_glock_nq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	int error = 0;

1070
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
David Teigland's avatar
David Teigland committed
1071 1072 1073 1074
		return -EIO;

	spin_lock(&gl->gl_spin);
	add_to_queue(gh);
1075
	run_queue(gl, 1);
David Teigland's avatar
David Teigland committed
1076 1077
	spin_unlock(&gl->gl_spin);

1078 1079
	if (!(gh->gh_flags & GL_ASYNC))
		error = gfs2_glock_wait(gh);
David Teigland's avatar
David Teigland committed
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092

	return error;
}

/**
 * gfs2_glock_poll - poll to see if an async request has been completed
 * @gh: the holder
 *
 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
 */

int gfs2_glock_poll(struct gfs2_holder *gh)
{
1093
	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
David Teigland's avatar
David Teigland committed
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
}

/**
 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
 * @gh: the glock holder
 *
 */

void gfs2_glock_dq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
1105
	const struct gfs2_glock_operations *glops = gl->gl_ops;
1106
	unsigned delay = 0;
1107
	int fast_path = 0;
David Teigland's avatar
David Teigland committed
1108

1109
	spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1110
	if (gh->gh_flags & GL_NOCACHE)
1111
		handle_callback(gl, LM_ST_UNLOCKED, 0);
David Teigland's avatar
David Teigland committed
1112 1113

	list_del_init(&gh->gh_list);
1114
	if (find_first_holder(gl) == NULL) {
1115
		if (glops->go_unlock) {
1116
			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1117
			spin_unlock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1118
			glops->go_unlock(gh);
1119
			spin_lock(&gl->gl_spin);
1120
			clear_bit(GLF_LOCK, &gl->gl_flags);
1121
		}
1122 1123 1124 1125
		if (list_empty(&gl->gl_holders) &&
		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
			fast_path = 1;
David Teigland's avatar
David Teigland committed
1126
	}
Steven Whitehouse's avatar
Steven Whitehouse committed
1127
	trace_gfs2_glock_queue(gh, 0);
David Teigland's avatar
David Teigland committed
1128
	spin_unlock(&gl->gl_spin);
1129 1130
	if (likely(fast_path))
		return;
1131 1132 1133 1134 1135 1136 1137

	gfs2_glock_hold(gl);
	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
	    !test_bit(GLF_DEMOTE, &gl->gl_flags))
		delay = gl->gl_ops->go_min_hold_time;
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
		gfs2_glock_put(gl);
David Teigland's avatar
David Teigland committed
1138 1139
}

Abhijith Das's avatar
Abhijith Das committed
1140 1141 1142 1143 1144 1145 1146
void gfs2_glock_dq_wait(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	gfs2_glock_dq(gh);
	wait_on_demote(gl);
}

David Teigland's avatar
David Teigland committed
1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158