glock.c 52 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
17
#include <linux/kallsyms.h>
18
#include <linux/gfs2_ondisk.h>
19
#include <linux/list.h>
20
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
21
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
22
#include <asm/uaccess.h>
23 24
#include <linux/seq_file.h>
#include <linux/debugfs.h>
25 26
#include <linux/kthread.h>
#include <linux/freezer.h>
27 28
#include <linux/workqueue.h>
#include <linux/jiffies.h>
29 30 31
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
32
#include <linux/percpu.h>
33
#include <linux/list_sort.h>
34
#include <linux/lockref.h>
David Teigland's avatar
David Teigland committed
35 36

#include "gfs2.h"
37
#include "incore.h"
David Teigland's avatar
David Teigland committed
38 39 40 41 42 43 44
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
45
#include "util.h"
46
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
47 48
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
49

50
struct gfs2_glock_iter {
51 52 53 54 55
	int hash;			/* hash bucket index           */
	unsigned nhash;			/* Index within current bucket */
	struct gfs2_sbd *sdp;		/* incore superblock           */
	struct gfs2_glock *gl;		/* current glock struct        */
	loff_t last_pos;		/* last position               */
56 57
};

David Teigland's avatar
David Teigland committed
58 59
typedef void (*glock_examiner) (struct gfs2_glock * gl);

60
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61

62
static struct dentry *gfs2_root;
63
static struct workqueue_struct *glock_workqueue;
64
struct workqueue_struct *gfs2_delete_workqueue;
65 66
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
67
static DEFINE_SPINLOCK(lru_lock);
68

69
#define GFS2_GL_HASH_SHIFT      15
70 71 72
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

73
static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74
static struct dentry *gfs2_root;
75

David Teigland's avatar
David Teigland committed
76 77 78 79 80 81 82
/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

83 84
static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
85 86 87
{
	unsigned int h;

88
	h = jhash(&name->ln_number, sizeof(u64), 0);
David Teigland's avatar
David Teigland committed
89
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
90
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teigland's avatar
David Teigland committed
91 92 93 94 95
	h &= GFS2_GL_HASH_MASK;

	return h;
}

96 97
static inline void spin_lock_bucket(unsigned int hash)
{
98
	hlist_bl_lock(&gl_hash_table[hash]);
99
}
David Teigland's avatar
David Teigland committed
100

101 102
static inline void spin_unlock_bucket(unsigned int hash)
{
103
	hlist_bl_unlock(&gl_hash_table[hash]);
104
}
David Teigland's avatar
David Teigland committed
105

106
static void gfs2_glock_dealloc(struct rcu_head *rcu)
David Teigland's avatar
David Teigland committed
107
{
108
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
David Teigland's avatar
David Teigland committed
109

110
	if (gl->gl_ops->go_flags & GLOF_ASPACE) {
111
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
112
	} else {
113
		kfree(gl->gl_lksb.sb_lvbptr);
114
		kmem_cache_free(gfs2_glock_cachep, gl);
115
	}
116 117 118
}

void gfs2_glock_free(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
119 120 121
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

122
	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
123 124
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
David Teigland's avatar
David Teigland committed
125 126 127 128 129 130 131 132
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

133
static void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
134
{
135 136
	GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
	lockref_get(&gl->gl_lockref);
David Teigland's avatar
David Teigland committed
137 138
}

139 140 141 142 143 144 145 146 147 148 149 150 151
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
152
	if (!list_empty(&gl->gl_holders))
153 154 155 156 157 158
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

159

160 161 162 163 164 165 166 167 168 169
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);

	if (!list_empty(&gl->gl_lru))
		list_del_init(&gl->gl_lru);
	else
		atomic_inc(&lru_count);

	list_add_tail(&gl->gl_lru, &lru_list);
170
	set_bit(GLF_LRU, &gl->gl_flags);
171 172 173
	spin_unlock(&lru_lock);
}

174
static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
175 176 177 178 179 180
{
	if (!list_empty(&gl->gl_lru)) {
		list_del_init(&gl->gl_lru);
		atomic_dec(&lru_count);
		clear_bit(GLF_LRU, &gl->gl_flags);
	}
181 182 183 184 185 186
}

static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);
	__gfs2_glock_remove_from_lru(gl);
187 188 189
	spin_unlock(&lru_lock);
}

David Teigland's avatar
David Teigland committed
190 191 192 193 194 195
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

196
void gfs2_glock_put(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
197
{
198 199
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = gfs2_glock2aspace(gl);
David Teigland's avatar
David Teigland committed
200

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	if (lockref_put_or_lock(&gl->gl_lockref))
		return;

	lockref_mark_dead(&gl->gl_lockref);

	spin_lock(&lru_lock);
	__gfs2_glock_remove_from_lru(gl);
	spin_unlock(&lru_lock);
	spin_unlock(&gl->gl_lockref.lock);
	spin_lock_bucket(gl->gl_hash);
	hlist_bl_del_rcu(&gl->gl_list);
	spin_unlock_bucket(gl->gl_hash);
	GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
	GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
	trace_gfs2_glock_put(gl);
	sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teigland's avatar
David Teigland committed
217 218 219 220 221 222 223 224 225 226
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

227
static struct gfs2_glock *search_bucket(unsigned int hash,
228
					const struct gfs2_sbd *sdp,
229
					const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
230 231
{
	struct gfs2_glock *gl;
232
	struct hlist_bl_node *h;
David Teigland's avatar
David Teigland committed
233

234
	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
David Teigland's avatar
David Teigland committed
235 236
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
237 238
		if (gl->gl_sbd != sdp)
			continue;
239
		if (lockref_get_not_dead(&gl->gl_lockref))
240
			return gl;
David Teigland's avatar
David Teigland committed
241 242 243 244 245
	}

	return NULL;
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
264 265 266 267 268 269
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
270 271 272 273 274 275 276 277 278 279 280 281
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

306 307 308 309
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
310 311
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
312 313 314
 */

static int do_promote(struct gfs2_glock *gl)
315 316
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
				spin_unlock(&gl->gl_spin);
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
				spin_lock(&gl->gl_spin);
				if (ret) {
334 335
					if (ret == 1)
						return 2;
336 337
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
338
					trace_gfs2_glock_queue(gh, 0);
339 340 341 342
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
343
				trace_gfs2_promote(gh, 1);
344 345 346 347
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
348
			trace_gfs2_promote(gh, 0);
349 350 351 352 353
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
354
		do_error(gl, 0);
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
391
		GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
392
		if (held2)
393
			gl->gl_lockref.count++;
394
		else
395
			gl->gl_lockref.count--;
396
	}
397 398
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
399

400 401 402 403
	if (new_state != gl->gl_target)
		/* shorten our minimum hold time */
		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
				       GL_GLOCK_MIN_HOLD);
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427
	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
428
	int rv;
429 430

	spin_lock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
431
	trace_gfs2_glock_state_change(gl, state);
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
			GLOCK_BUG_ON(gl, 1);
		}
		spin_unlock(&gl->gl_spin);
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
			spin_unlock(&gl->gl_spin);
			rv = glops->go_xmote_bh(gl, gh);
			spin_lock(&gl->gl_spin);
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
491 492 493
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
494 495 496
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
497
out_locked:
498 499 500 501 502 503 504 505 506 507 508 509
	spin_unlock(&gl->gl_spin);
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
510 511
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
512 513 514 515 516 517 518 519
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
520 521
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
522 523 524 525 526
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
527
	gl->gl_req = target;
528 529 530 531 532
	set_bit(GLF_BLOCKING, &gl->gl_flags);
	if ((gl->gl_req == LM_ST_UNLOCKED) ||
	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
		clear_bit(GLF_BLOCKING, &gl->gl_flags);
533
	spin_unlock(&gl->gl_spin);
534 535
	if (glops->go_sync)
		glops->go_sync(gl);
536 537 538 539 540
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
541 542 543
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
544 545 546 547
		if (ret) {
			printk(KERN_ERR "GFS2: lm_lock ret %d\n", ret);
			GLOCK_BUG_ON(gl, 1);
		}
548 549
	} else { /* lock_nolock */
		finish_xmote(gl, target);
550 551 552
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	}
553

554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	spin_lock(&gl->gl_spin);
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
582 583
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
584 585
{
	struct gfs2_holder *gh = NULL;
586
	int ret;
587 588 589 590 591 592 593 594 595

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
596
			goto out_unlock;
597 598 599
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
600
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
601 602 603 604
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
605 606
		ret = do_promote(gl);
		if (ret == 0)
607
			goto out_unlock;
608
		if (ret == 2)
609
			goto out;
610 611 612 613 614 615
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
616
out:
617 618 619
	return;

out_sched:
620 621
	clear_bit(GLF_LOCK, &gl->gl_flags);
	smp_mb__after_clear_bit();
622
	gl->gl_lockref.count++;
623
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
624
		gl->gl_lockref.count--;
625 626
	return;

627
out_unlock:
628
	clear_bit(GLF_LOCK, &gl->gl_flags);
629 630
	smp_mb__after_clear_bit();
	return;
631 632
}

633 634 635 636
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
637
	struct gfs2_inode *ip;
638
	struct inode *inode;
639 640 641 642
	u64 no_addr = gl->gl_name.ln_number;

	ip = gl->gl_object;
	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
643 644

	if (ip)
645
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
646 647 648 649 650
	else
		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
651 652 653 654
	}
	gfs2_glock_put(gl);
}

655 656
static void glock_work_func(struct work_struct *work)
{
657
	unsigned long delay = 0;
658
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
659
	int drop_ref = 0;
660

661
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
662
		finish_xmote(gl, gl->gl_reply);
663 664
		drop_ref = 1;
	}
665
	spin_lock(&gl->gl_spin);
666
	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 668
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669
		unsigned long holdtime, now = jiffies;
670

671
		holdtime = gl->gl_tchange + gl->gl_hold_time;
672 673
		if (time_before(now, holdtime))
			delay = holdtime - now;
674 675 676 677 678

		if (!delay) {
			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
			set_bit(GLF_DEMOTE, &gl->gl_flags);
		}
679 680
	}
	run_queue(gl, 0);
681
	spin_unlock(&gl->gl_spin);
682
	if (!delay)
683
		gfs2_glock_put(gl);
684 685 686 687 688 689
	else {
		if (gl->gl_name.ln_type != LM_TYPE_INODE)
			delay = 0;
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
			gfs2_glock_put(gl);
	}
690 691
	if (drop_ref)
		gfs2_glock_put(gl);
692 693
}

David Teigland's avatar
David Teigland committed
694 695 696 697 698 699 700 701 702 703 704 705 706
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

707
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
708
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
709 710
		   struct gfs2_glock **glp)
{
711
	struct super_block *s = sdp->sd_vfs;
712
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teigland's avatar
David Teigland committed
713
	struct gfs2_glock *gl, *tmp;
714
	unsigned int hash = gl_hash(sdp, &name);
715
	struct address_space *mapping;
716
	struct kmem_cache *cachep;
David Teigland's avatar
David Teigland committed
717

718
	rcu_read_lock();
719
	gl = search_bucket(hash, sdp, &name);
720
	rcu_read_unlock();
David Teigland's avatar
David Teigland committed
721

722 723
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
724
		return 0;
725 726
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
727

728
	if (glops->go_flags & GLOF_ASPACE)
729
		cachep = gfs2_glock_aspace_cachep;
730
	else
731 732
		cachep = gfs2_glock_cachep;
	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
David Teigland's avatar
David Teigland committed
733 734 735
	if (!gl)
		return -ENOMEM;

736 737 738
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));

	if (glops->go_flags & GLOF_LVB) {
739 740
		gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
		if (!gl->gl_lksb.sb_lvbptr) {
741 742 743 744 745
			kmem_cache_free(cachep, gl);
			return -ENOMEM;
		}
	}

746
	atomic_inc(&sdp->sd_glock_disposal);
747
	gl->gl_sbd = sdp;
748
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
749
	gl->gl_name = name;
750
	gl->gl_lockref.count = 1;
David Teigland's avatar
David Teigland committed
751
	gl->gl_state = LM_ST_UNLOCKED;
752
	gl->gl_target = LM_ST_UNLOCKED;
753
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
754
	gl->gl_hash = hash;
David Teigland's avatar
David Teigland committed
755
	gl->gl_ops = glops;
756 757 758 759 760 761 762
	gl->gl_dstamp = ktime_set(0, 0);
	preempt_disable();
	/* We use the global stats to estimate the initial per-glock stats */
	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
	preempt_enable();
	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
763
	gl->gl_tchange = jiffies;
764
	gl->gl_object = NULL;
765
	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
766
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
767
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
768

769 770 771 772 773 774
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
775
		mapping->private_data = NULL;
776 777
		mapping->backing_dev_info = s->s_bdi;
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
778 779
	}

780
	spin_lock_bucket(hash);
781
	tmp = search_bucket(hash, sdp, &name);
David Teigland's avatar
David Teigland committed
782
	if (tmp) {
783
		spin_unlock_bucket(hash);
784
		kfree(gl->gl_lksb.sb_lvbptr);
785
		kmem_cache_free(cachep, gl);
786
		atomic_dec(&sdp->sd_glock_disposal);
David Teigland's avatar
David Teigland committed
787 788
		gl = tmp;
	} else {
789 790
		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
		spin_unlock_bucket(hash);
David Teigland's avatar
David Teigland committed
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806
	}

	*glp = gl;

	return 0;
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

807
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teigland's avatar
David Teigland committed
808 809 810 811
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
812
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
813
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

831
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
832 833
{
	gh->gh_state = state;
834
	gh->gh_flags = flags;
835
	gh->gh_iflags = 0;
836
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
Bob Peterson's avatar
Bob Peterson committed
837 838 839
	if (gh->gh_owner_pid)
		put_pid(gh->gh_owner_pid);
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
840 841 842 843 844 845 846 847 848 849
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
850
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
851 852
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
853
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
854 855
}

856 857 858 859 860 861 862 863 864 865
/**
 * gfs2_glock_holder_wait
 * @word: unused
 *
 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 * field. Thus I've separated these otherwise identical functions in
 * order to be more informative to the user.
 */

static int gfs2_glock_holder_wait(void *word)
866 867 868 869 870
{
        schedule();
        return 0;
}

871 872 873 874 875 876
static int gfs2_glock_demote_wait(void *word)
{
	schedule();
	return 0;
}

877 878 879 880 881 882 883 884
/**
 * gfs2_glock_wait - wait on a glock acquisition
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

int gfs2_glock_wait(struct gfs2_holder *gh)
885
{
886 887
	unsigned long time1 = jiffies;

888
	might_sleep();
889
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
890 891 892 893 894
	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
		/* Lengthen the minimum hold time. */
		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
					      GL_GLOCK_HOLD_INCR,
					      GL_GLOCK_MAX_HOLD);
895
	return gh->gh_error;
896 897
}

David Teigland's avatar
David Teigland committed
898
/**
899 900 901
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
902
 *
903 904
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
905 906
 */

907
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
908
			    unsigned long delay, bool remote)
David Teigland's avatar
David Teigland committed
909
{
910
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
911

912 913 914 915 916 917 918
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
919
	}
920
	if (gl->gl_ops->go_callback)
921
		gl->gl_ops->go_callback(gl, remote);
922
	trace_gfs2_demote_rq(gl, remote);
David Teigland's avatar
David Teigland committed
923 924
}

925
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
926
{
927
	struct va_format vaf;
928 929 930
	va_list args;

	va_start(args, fmt);
931

932
	if (seq) {
933
		seq_vprintf(seq, fmt, args);
934
	} else {
935 936 937 938
		vaf.fmt = fmt;
		vaf.va = &args;

		printk(KERN_ERR " %pV", &vaf);
939
	}
940

941 942 943
	va_end(args);
}

David Teigland's avatar
David Teigland committed
944 945 946 947
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
948 949 950 951
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
952 953
 */

954
static inline void add_to_queue(struct gfs2_holder *gh)
955 956
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
David Teigland's avatar
David Teigland committed
957 958
{
	struct gfs2_glock *gl = gh->gh_gl;
959 960 961
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
962
	int try_futile = 0;
David Teigland's avatar
David Teigland committed
963

964
	BUG_ON(gh->gh_owner_pid == NULL);
965 966
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
967

968 969
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
970
			try_futile = !may_grant(gl, gh);
971 972 973 974 975 976 977 978
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
979 980
		if (try_futile &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
981 982 983 984
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
985
		}
986 987 988 989 990
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
991
	set_bit(GLF_QUEUED, &gl->gl_flags);
992
	trace_gfs2_glock_queue(gh, 1);
993 994
	gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
	gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
995 996 997 998 999 1000 1001 1002 1003 1004 1005
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
		spin_unlock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
1006
		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
1007
			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
1008
		spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1009
	}
1010
	return;
David Teigland's avatar
David Teigland committed
1011

1012
trap_recursive:
1013
	printk(KERN_ERR "original: %pSR\n", (void *)gh2->gh_ip);
1014 1015 1016
	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1017
	printk(KERN_ERR "new: %pSR\n", (void *)gh->gh_ip);
1018 1019 1020
	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
1021
	gfs2_dump_glock(NULL, gl);
1022
	BUG();
David Teigland's avatar
David Teigland committed
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
}

/**
 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 * @gh: the holder structure
 *
 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 *
 * Returns: 0, GLR_TRYFAILED, or errno on failure
 */

int gfs2_glock_nq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	int error = 0;

1040
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))