glock.c 44.9 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
17
#include <linux/kallsyms.h>
18
#include <linux/gfs2_ondisk.h>
19
#include <linux/list.h>
20
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
21
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
22
#include <asm/uaccess.h>
23 24
#include <linux/seq_file.h>
#include <linux/debugfs.h>
25 26
#include <linux/kthread.h>
#include <linux/freezer.h>
27 28
#include <linux/workqueue.h>
#include <linux/jiffies.h>
29 30 31
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
David Teigland's avatar
David Teigland committed
32 33

#include "gfs2.h"
34
#include "incore.h"
David Teigland's avatar
David Teigland committed
35 36 37 38 39 40 41
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
42
#include "util.h"
43
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
44 45
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
46

47 48 49 50 51
struct gfs2_glock_iter {
	int hash;			/* hash bucket index         */
	struct gfs2_sbd *sdp;		/* incore superblock         */
	struct gfs2_glock *gl;		/* current glock struct      */
	char string[512];		/* scratch space             */
52 53
};

David Teigland's avatar
David Teigland committed
54 55
typedef void (*glock_examiner) (struct gfs2_glock * gl);

56 57 58
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
59

60
static struct dentry *gfs2_root;
61
static struct workqueue_struct *glock_workqueue;
62
struct workqueue_struct *gfs2_delete_workqueue;
63 64
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
65
static DEFINE_SPINLOCK(lru_lock);
66

67
#define GFS2_GL_HASH_SHIFT      15
68 69 70
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

71
static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72
static struct dentry *gfs2_root;
73

David Teigland's avatar
David Teigland committed
74 75 76 77 78 79 80
/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

81 82
static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
83 84 85
{
	unsigned int h;

86
	h = jhash(&name->ln_number, sizeof(u64), 0);
David Teigland's avatar
David Teigland committed
87
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
88
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teigland's avatar
David Teigland committed
89 90 91 92 93
	h &= GFS2_GL_HASH_MASK;

	return h;
}

94 95 96 97 98 99 100 101 102 103 104
static inline void spin_lock_bucket(unsigned int hash)
{
	struct hlist_bl_head *bl = &gl_hash_table[hash];
	bit_spin_lock(0, (unsigned long *)bl);
}

static inline void spin_unlock_bucket(unsigned int hash)
{
	struct hlist_bl_head *bl = &gl_hash_table[hash];
	__bit_spin_unlock(0, (unsigned long *)bl);
}
David Teigland's avatar
David Teigland committed
105

106
void gfs2_glock_free(struct rcu_head *rcu)
David Teigland's avatar
David Teigland committed
107
{
108
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
David Teigland's avatar
David Teigland committed
109 110
	struct gfs2_sbd *sdp = gl->gl_sbd;

111 112 113 114 115 116 117
	if (gl->gl_ops->go_flags & GLOF_ASPACE)
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
	else
		kmem_cache_free(gfs2_glock_cachep, gl);

	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
David Teigland's avatar
David Teigland committed
118 119 120 121 122 123 124 125
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

126
void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
127
{
128
	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
129
	atomic_inc(&gl->gl_ref);
David Teigland's avatar
David Teigland committed
130 131
}

132 133 134 135 136 137 138 139 140 141 142
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

143 144
	/* assert_spin_locked(&gl->gl_spin); */

145 146
	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
147 148 149 150
	if (test_bit(GLF_LFLUSH, &gl->gl_flags))
		return 0;
	if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
	    !list_empty(&gl->gl_holders))
151 152 153 154 155 156
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

157

158
/**
159
 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
160 161
 * @gl: the glock
 *
162 163
 * If the glock is demotable, then we add it (or move it) to the end
 * of the glock LRU list.
164 165
 */

166
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
167
{
168 169 170 171 172 173 174 175
	if (demote_ok(gl)) {
		spin_lock(&lru_lock);

		if (!list_empty(&gl->gl_lru))
			list_del_init(&gl->gl_lru);
		else
			atomic_inc(&lru_count);

176
		list_add_tail(&gl->gl_lru, &lru_list);
177
		spin_unlock(&lru_lock);
178
	}
179 180 181 182 183 184 185
}

void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
{
	spin_lock(&gl->gl_spin);
	__gfs2_glock_schedule_for_reclaim(gl);
	spin_unlock(&gl->gl_spin);
186 187
}

188 189 190 191 192 193 194 195
/**
 * gfs2_glock_put_nolock() - Decrement reference count on glock
 * @gl: The glock to put
 *
 * This function should only be used if the caller has its own reference
 * to the glock, in addition to the one it is dropping.
 */

196
void gfs2_glock_put_nolock(struct gfs2_glock *gl)
197 198 199 200 201
{
	if (atomic_dec_and_test(&gl->gl_ref))
		GLOCK_BUG_ON(gl, 1);
}

David Teigland's avatar
David Teigland committed
202 203 204 205 206 207
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

208
void gfs2_glock_put(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
209
{
210 211
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = gfs2_glock2aspace(gl);
David Teigland's avatar
David Teigland committed
212

213 214 215 216 217
	if (atomic_dec_and_test(&gl->gl_ref)) {
		spin_lock_bucket(gl->gl_hash);
		hlist_bl_del_rcu(&gl->gl_list);
		spin_unlock_bucket(gl->gl_hash);
		spin_lock(&lru_lock);
218 219 220 221 222
		if (!list_empty(&gl->gl_lru)) {
			list_del_init(&gl->gl_lru);
			atomic_dec(&lru_count);
		}
		spin_unlock(&lru_lock);
223
		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
224 225 226
		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
		trace_gfs2_glock_put(gl);
		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teigland's avatar
David Teigland committed
227 228 229 230 231 232 233 234 235 236 237
	}
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

238
static struct gfs2_glock *search_bucket(unsigned int hash,
239
					const struct gfs2_sbd *sdp,
240
					const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
241 242
{
	struct gfs2_glock *gl;
243
	struct hlist_bl_node *h;
David Teigland's avatar
David Teigland committed
244

245
	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
David Teigland's avatar
David Teigland committed
246 247
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
248 249
		if (gl->gl_sbd != sdp)
			continue;
250 251
		if (atomic_inc_not_zero(&gl->gl_ref))
			return gl;
David Teigland's avatar
David Teigland committed
252 253 254 255 256
	}

	return NULL;
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
275 276 277 278 279 280
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
281 282 283 284 285 286 287 288 289 290 291 292
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

317 318 319 320
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
321 322
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
323 324 325
 */

static int do_promote(struct gfs2_glock *gl)
326 327
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
				spin_unlock(&gl->gl_spin);
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
				spin_lock(&gl->gl_spin);
				if (ret) {
345 346
					if (ret == 1)
						return 2;
347 348
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
349
					trace_gfs2_glock_queue(gh, 0);
350 351 352 353
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
354
				trace_gfs2_promote(gh, 1);
355 356 357 358
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
359
			trace_gfs2_promote(gh, 0);
360 361 362 363 364
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
365
		do_error(gl, 0);
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
		if (held2)
			gfs2_glock_hold(gl);
		else
405
			gfs2_glock_put_nolock(gl);
406
	}
407 408
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433

	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
434
	int rv;
435 436

	spin_lock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
437
	trace_gfs2_glock_state_change(gl, state);
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
			GLOCK_BUG_ON(gl, 1);
		}
		spin_unlock(&gl->gl_spin);
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
			spin_unlock(&gl->gl_spin);
			rv = glops->go_xmote_bh(gl, gh);
			spin_lock(&gl->gl_spin);
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
497 498 499
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
500 501 502
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
503
out_locked:
504 505 506 507 508 509 510 511 512 513 514 515
	spin_unlock(&gl->gl_spin);
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
516 517
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
518 519 520 521 522 523 524 525
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
526 527
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
528 529 530 531 532
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
533
	gl->gl_req = target;
534 535 536 537 538 539 540 541 542 543 544 545 546
	spin_unlock(&gl->gl_spin);
	if (glops->go_xmote_th)
		glops->go_xmote_th(gl);
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
	    gl->gl_state == LM_ST_DEFERRED) &&
	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
		lck_flags |= LM_FLAG_TRY_1CB;

547 548 549 550 551 552
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
		GLOCK_BUG_ON(gl, ret);
	} else { /* lock_nolock */
		finish_xmote(gl, target);
553 554 555
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	}
556

557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	spin_lock(&gl->gl_spin);
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
585 586
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
587 588
{
	struct gfs2_holder *gh = NULL;
589
	int ret;
590 591 592 593 594 595 596 597 598

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
599
			goto out_unlock;
600 601 602
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
603
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
604 605 606 607
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
608 609
		ret = do_promote(gl);
		if (ret == 0)
610
			goto out_unlock;
611
		if (ret == 2)
612
			goto out;
613 614 615 616 617 618
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
619
out:
620 621 622
	return;

out_sched:
623 624
	clear_bit(GLF_LOCK, &gl->gl_flags);
	smp_mb__after_clear_bit();
625 626
	gfs2_glock_hold(gl);
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
627
		gfs2_glock_put_nolock(gl);
628 629
	return;

630
out_unlock:
631
	clear_bit(GLF_LOCK, &gl->gl_flags);
632 633
	smp_mb__after_clear_bit();
	return;
634 635
}

636 637 638 639
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
640
	struct gfs2_inode *ip;
641
	struct inode *inode;
642 643 644 645
	u64 no_addr = gl->gl_name.ln_number;

	ip = gl->gl_object;
	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
646 647 648

	if (ip)
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
649 650 651 652 653
	else
		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
654 655 656 657
	}
	gfs2_glock_put(gl);
}

658 659
static void glock_work_func(struct work_struct *work)
{
660
	unsigned long delay = 0;
661
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
662
	int drop_ref = 0;
663

664
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
665
		finish_xmote(gl, gl->gl_reply);
666 667
		drop_ref = 1;
	}
668
	spin_lock(&gl->gl_spin);
669 670 671
	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
672 673 674 675 676 677 678
		unsigned long holdtime, now = jiffies;
		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
		if (time_before(now, holdtime))
			delay = holdtime - now;
		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
	}
	run_queue(gl, 0);
679
	spin_unlock(&gl->gl_spin);
680 681 682
	if (!delay ||
	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
		gfs2_glock_put(gl);
683 684
	if (drop_ref)
		gfs2_glock_put(gl);
685 686
}

David Teigland's avatar
David Teigland committed
687 688 689 690 691 692 693 694 695 696 697 698 699
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

700
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
701
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
702 703
		   struct gfs2_glock **glp)
{
704
	struct super_block *s = sdp->sd_vfs;
705
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teigland's avatar
David Teigland committed
706
	struct gfs2_glock *gl, *tmp;
707
	unsigned int hash = gl_hash(sdp, &name);
708
	struct address_space *mapping;
709
	struct kmem_cache *cachep;
David Teigland's avatar
David Teigland committed
710

711
	rcu_read_lock();
712
	gl = search_bucket(hash, sdp, &name);
713
	rcu_read_unlock();
David Teigland's avatar
David Teigland committed
714

715 716
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
717
		return 0;
718 719
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
720

721
	if (glops->go_flags & GLOF_ASPACE)
722
		cachep = gfs2_glock_aspace_cachep;
723
	else
724 725
		cachep = gfs2_glock_cachep;
	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
David Teigland's avatar
David Teigland committed
726 727 728
	if (!gl)
		return -ENOMEM;

729
	atomic_inc(&sdp->sd_glock_disposal);
730
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
731
	gl->gl_name = name;
732
	atomic_set(&gl->gl_ref, 1);
David Teigland's avatar
David Teigland committed
733
	gl->gl_state = LM_ST_UNLOCKED;
734
	gl->gl_target = LM_ST_UNLOCKED;
735
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
736
	gl->gl_hash = hash;
David Teigland's avatar
David Teigland committed
737
	gl->gl_ops = glops;
738 739 740
	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
741
	gl->gl_tchange = jiffies;
742
	gl->gl_object = NULL;
David Teigland's avatar
David Teigland committed
743
	gl->gl_sbd = sdp;
744
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
745
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
746

747 748 749 750 751 752 753 754 755
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
		mapping->assoc_mapping = NULL;
		mapping->backing_dev_info = s->s_bdi;
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
756 757
	}

758
	spin_lock_bucket(hash);
759
	tmp = search_bucket(hash, sdp, &name);
David Teigland's avatar
David Teigland committed
760
	if (tmp) {
761 762
		spin_unlock_bucket(hash);
		kmem_cache_free(cachep, gl);
David Teigland's avatar
David Teigland committed
763 764
		gl = tmp;
	} else {
765 766
		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
		spin_unlock_bucket(hash);
David Teigland's avatar
David Teigland committed
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
	}

	*glp = gl;

	return 0;
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

783
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teigland's avatar
David Teigland committed
784 785 786 787
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
788
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
789
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

807
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
808 809
{
	gh->gh_state = state;
810
	gh->gh_flags = flags;
811
	gh->gh_iflags = 0;
812
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
Bob Peterson's avatar
Bob Peterson committed
813 814 815
	if (gh->gh_owner_pid)
		put_pid(gh->gh_owner_pid);
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
816 817 818 819 820 821 822 823 824 825
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
826
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
827 828
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
829
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
830 831
}

832 833 834 835 836 837 838 839 840 841
/**
 * gfs2_glock_holder_wait
 * @word: unused
 *
 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 * field. Thus I've separated these otherwise identical functions in
 * order to be more informative to the user.
 */

static int gfs2_glock_holder_wait(void *word)
842 843 844 845 846
{
        schedule();
        return 0;
}

847 848 849 850 851 852
static int gfs2_glock_demote_wait(void *word)
{
	schedule();
	return 0;
}

853
static void wait_on_holder(struct gfs2_holder *gh)
854
{
855
	might_sleep();
856
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
857 858
}

859
static void wait_on_demote(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
860
{
861
	might_sleep();
862
	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
David Teigland's avatar
David Teigland committed
863 864 865
}

/**
866 867 868
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
869
 *
870 871
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
872 873
 */

874
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
875
			    unsigned long delay)
David Teigland's avatar
David Teigland committed
876
{
877
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
878

879 880 881 882 883 884 885
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
886
	}
887 888
	if (gl->gl_ops->go_callback)
		gl->gl_ops->go_callback(gl);
Steven Whitehouse's avatar
Steven Whitehouse committed
889
	trace_gfs2_demote_rq(gl);
David Teigland's avatar
David Teigland committed
890 891 892
}

/**
893
 * gfs2_glock_wait - wait on a glock acquisition
David Teigland's avatar
David Teigland committed
894 895 896 897 898
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

899
int gfs2_glock_wait(struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
900
{
901
	wait_on_holder(gh);
David Teigland's avatar
David Teigland committed
902 903 904
	return gh->gh_error;
}

905
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
906
{
907
	struct va_format vaf;
908 909 910
	va_list args;

	va_start(args, fmt);
911

912 913
	if (seq) {
		struct gfs2_glock_iter *gi = seq->private;
914
		vsprintf(gi->string, fmt, args);
915 916
		seq_printf(seq, gi->string);
	} else {
917 918 919 920
		vaf.fmt = fmt;
		vaf.va = &args;

		printk(KERN_ERR " %pV", &vaf);
921
	}
922

923 924 925
	va_end(args);
}

David Teigland's avatar
David Teigland committed
926 927 928 929
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
930 931 932 933
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
934 935
 */

936
static inline void add_to_queue(struct gfs2_holder *gh)
937 938
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
David Teigland's avatar
David Teigland committed
939 940
{
	struct gfs2_glock *gl = gh->gh_gl;
941 942 943 944
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
	int try_lock = 0;
David Teigland's avatar
David Teigland committed
945

946
	BUG_ON(gh->gh_owner_pid == NULL);
947 948
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
949

950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
			try_lock = 1;
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
		if (try_lock &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
		    !may_grant(gl, gh)) {
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
968
		}
969 970 971 972 973
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
974
	set_bit(GLF_QUEUED, &gl->gl_flags);
975 976 977 978 979 980
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
Steven Whitehouse's avatar
Steven Whitehouse committed
981
	trace_gfs2_glock_queue(gh, 1);
982 983 984 985 986
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
		spin_unlock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
987
		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
988
			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
989
		spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
990
	}
991
	return;
David Teigland's avatar
David Teigland committed
992

993 994 995 996 997 998 999 1000 1001 1002 1003
trap_recursive:
	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
	__dump_glock(NULL, gl);
	BUG();
David Teigland's avatar
David Teigland committed
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
}

/**
 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 * @gh: the holder structure
 *
 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 *
 * Returns: 0, GLR_TRYFAILED, or errno on failure
 */

int gfs2_glock_nq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	int error = 0;

1021
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
David Teigland's avatar
David Teigland committed
1022 1023 1024 1025
		return -EIO;

	spin_lock(&gl->gl_spin);
	add_to_queue(gh);
1026 1027 1028
	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1029
	run_queue(gl, 1);
David Teigland's avatar
David Teigland committed
1030 1031
	spin_unlock(&gl->gl_spin);

1032 1033
	if (!(gh->gh_flags & GL_ASYNC))
		error = gfs2_glock_wait(gh);
David Teigland's avatar
David Teigland committed
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046

	return error;
}

/**
 * gfs2_glock_poll - poll to see if an async request has been completed
 * @gh: the holder
 *
 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
 */

int gfs2_glock_poll(struct gfs2_holder *gh)
{
1047
	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
David Teigland's avatar
David Teigland committed
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
}

/**
 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
 * @gh: the glock holder
 *
 */

void gfs2_glock_dq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
1059
	const struct gfs2_glock_operations *glops = gl->gl_ops;
1060
	unsigned delay = 0;
1061
	int fast_path = 0;
David Teigland's avatar
David Teigland committed
1062

1063
	spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1064
	if (gh->gh_flags & GL_NOCACHE)
1065
		handle_callback(gl, LM_ST_UNLOCKED, 0);
David Teigland's avatar
David Teigland committed
1066 1067

	list_del_init(&gh->gh_list);
1068
	if (find_first_holder(gl) == NULL) {
1069
		if (glops->go_unlock) {
1070
			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1071
			spin_unlock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1072
			glops->go_unlock(gh);
1073
			spin_lock(&gl->gl_spin);
1074
			clear_bit(GLF_LOCK, &gl->gl_flags);
1075
		}
1076 1077 1078 1079
		if (list_empty(&gl->gl_holders) &&
		    !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
		    !test_bit(GLF_DEMOTE, &gl->gl_flags))
			fast_path = 1;
David Teigland's avatar
David Teigland committed
1080
	}
1081
	__gfs2_glock_schedule_for_reclaim(gl);
Steven Whitehouse's avatar
Steven Whitehouse committed
1082
	trace_gfs2_glock_queue(gh, 0);
David Teigland's avatar
David Teigland committed
1083
	spin_unlock(&gl->gl_spin);