glock.c 51.3 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
17
#include <linux/kallsyms.h>
18
#include <linux/gfs2_ondisk.h>
19
#include <linux/list.h>
20
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
21
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
22
#include <asm/uaccess.h>
23 24
#include <linux/seq_file.h>
#include <linux/debugfs.h>
25 26
#include <linux/kthread.h>
#include <linux/freezer.h>
27 28
#include <linux/workqueue.h>
#include <linux/jiffies.h>
29 30 31
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
32
#include <linux/percpu.h>
David Teigland's avatar
David Teigland committed
33 34

#include "gfs2.h"
35
#include "incore.h"
David Teigland's avatar
David Teigland committed
36 37 38 39 40 41 42
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
43
#include "util.h"
44
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
45 46
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
47

48
struct gfs2_glock_iter {
49 50 51 52 53
	int hash;			/* hash bucket index           */
	unsigned nhash;			/* Index within current bucket */
	struct gfs2_sbd *sdp;		/* incore superblock           */
	struct gfs2_glock *gl;		/* current glock struct        */
	loff_t last_pos;		/* last position               */
54 55
};

David Teigland's avatar
David Teigland committed
56 57
typedef void (*glock_examiner) (struct gfs2_glock * gl);

58 59 60
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
61

62
static struct dentry *gfs2_root;
63
static struct workqueue_struct *glock_workqueue;
64
struct workqueue_struct *gfs2_delete_workqueue;
65 66
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
67
static DEFINE_SPINLOCK(lru_lock);
68

69
#define GFS2_GL_HASH_SHIFT      15
70 71 72
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

73
static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
74
static struct dentry *gfs2_root;
75

David Teigland's avatar
David Teigland committed
76 77 78 79 80 81 82
/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

83 84
static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
85 86 87
{
	unsigned int h;

88
	h = jhash(&name->ln_number, sizeof(u64), 0);
David Teigland's avatar
David Teigland committed
89
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
90
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teigland's avatar
David Teigland committed
91 92 93 94 95
	h &= GFS2_GL_HASH_MASK;

	return h;
}

96 97
static inline void spin_lock_bucket(unsigned int hash)
{
98
	hlist_bl_lock(&gl_hash_table[hash]);
99
}
David Teigland's avatar
David Teigland committed
100

101 102
static inline void spin_unlock_bucket(unsigned int hash)
{
103
	hlist_bl_unlock(&gl_hash_table[hash]);
104
}
David Teigland's avatar
David Teigland committed
105

106
static void gfs2_glock_dealloc(struct rcu_head *rcu)
David Teigland's avatar
David Teigland committed
107
{
108
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
David Teigland's avatar
David Teigland committed
109

110 111 112 113
	if (gl->gl_ops->go_flags & GLOF_ASPACE)
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
	else
		kmem_cache_free(gfs2_glock_cachep, gl);
114 115 116
}

void gfs2_glock_free(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
117 118 119
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

120
	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
121 122
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
David Teigland's avatar
David Teigland committed
123 124 125 126 127 128 129 130
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

131
void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
132
{
133
	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
134
	atomic_inc(&gl->gl_ref);
David Teigland's avatar
David Teigland committed
135 136
}

137 138 139 140 141 142 143 144 145 146 147 148 149
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
150
	if (!list_empty(&gl->gl_holders))
151 152 153 154 155 156
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

157

158 159 160 161 162 163 164 165 166 167
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);

	if (!list_empty(&gl->gl_lru))
		list_del_init(&gl->gl_lru);
	else
		atomic_inc(&lru_count);

	list_add_tail(&gl->gl_lru, &lru_list);
168
	set_bit(GLF_LRU, &gl->gl_flags);
169 170 171
	spin_unlock(&lru_lock);
}

172
static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
173 174 175 176 177 178
{
	if (!list_empty(&gl->gl_lru)) {
		list_del_init(&gl->gl_lru);
		atomic_dec(&lru_count);
		clear_bit(GLF_LRU, &gl->gl_flags);
	}
179 180 181 182 183 184
}

static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);
	__gfs2_glock_remove_from_lru(gl);
185 186 187
	spin_unlock(&lru_lock);
}

188
/**
189
 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
190 191
 * @gl: the glock
 *
192 193
 * If the glock is demotable, then we add it (or move it) to the end
 * of the glock LRU list.
194 195
 */

196
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
197
{
198 199
	if (demote_ok(gl))
		gfs2_glock_add_to_lru(gl);
200 201
}

202 203 204 205 206 207 208 209
/**
 * gfs2_glock_put_nolock() - Decrement reference count on glock
 * @gl: The glock to put
 *
 * This function should only be used if the caller has its own reference
 * to the glock, in addition to the one it is dropping.
 */

210
void gfs2_glock_put_nolock(struct gfs2_glock *gl)
211 212 213 214 215
{
	if (atomic_dec_and_test(&gl->gl_ref))
		GLOCK_BUG_ON(gl, 1);
}

David Teigland's avatar
David Teigland committed
216 217 218 219 220 221
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

222
void gfs2_glock_put(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
223
{
224 225
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = gfs2_glock2aspace(gl);
David Teigland's avatar
David Teigland committed
226

227 228 229
	if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
		__gfs2_glock_remove_from_lru(gl);
		spin_unlock(&lru_lock);
230 231 232
		spin_lock_bucket(gl->gl_hash);
		hlist_bl_del_rcu(&gl->gl_list);
		spin_unlock_bucket(gl->gl_hash);
233
		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
234 235 236
		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
		trace_gfs2_glock_put(gl);
		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teigland's avatar
David Teigland committed
237 238 239 240 241 242 243 244 245 246 247
	}
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

248
static struct gfs2_glock *search_bucket(unsigned int hash,
249
					const struct gfs2_sbd *sdp,
250
					const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
251 252
{
	struct gfs2_glock *gl;
253
	struct hlist_bl_node *h;
David Teigland's avatar
David Teigland committed
254

255
	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
David Teigland's avatar
David Teigland committed
256 257
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
258 259
		if (gl->gl_sbd != sdp)
			continue;
260 261
		if (atomic_inc_not_zero(&gl->gl_ref))
			return gl;
David Teigland's avatar
David Teigland committed
262 263 264 265 266
	}

	return NULL;
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
285 286 287 288 289 290
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
291 292 293 294 295 296 297 298 299 300 301 302
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

327 328 329 330
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
331 332
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
333 334 335
 */

static int do_promote(struct gfs2_glock *gl)
336 337
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
				spin_unlock(&gl->gl_spin);
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
				spin_lock(&gl->gl_spin);
				if (ret) {
355 356
					if (ret == 1)
						return 2;
357 358
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
359
					trace_gfs2_glock_queue(gh, 0);
360 361 362 363
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
364
				trace_gfs2_promote(gh, 1);
365 366 367 368
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
369
			trace_gfs2_promote(gh, 0);
370 371 372 373 374
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
375
		do_error(gl, 0);
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
		if (held2)
			gfs2_glock_hold(gl);
		else
415
			gfs2_glock_put_nolock(gl);
416
	}
417 418
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
419

420 421 422 423
	if (new_state != gl->gl_target)
		/* shorten our minimum hold time */
		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
				       GL_GLOCK_MIN_HOLD);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
448
	int rv;
449 450

	spin_lock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
451
	trace_gfs2_glock_state_change(gl, state);
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
			GLOCK_BUG_ON(gl, 1);
		}
		spin_unlock(&gl->gl_spin);
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
			spin_unlock(&gl->gl_spin);
			rv = glops->go_xmote_bh(gl, gh);
			spin_lock(&gl->gl_spin);
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
511 512 513
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
514 515 516
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
517
out_locked:
518 519 520 521 522 523 524 525 526 527 528 529
	spin_unlock(&gl->gl_spin);
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
530 531
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
532 533 534 535 536 537 538 539
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
540 541
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
542 543 544 545 546
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
547
	gl->gl_req = target;
548 549 550 551 552
	set_bit(GLF_BLOCKING, &gl->gl_flags);
	if ((gl->gl_req == LM_ST_UNLOCKED) ||
	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
		clear_bit(GLF_BLOCKING, &gl->gl_flags);
553 554 555 556 557 558 559 560
	spin_unlock(&gl->gl_spin);
	if (glops->go_xmote_th)
		glops->go_xmote_th(gl);
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
561 562 563 564 565 566
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
		GLOCK_BUG_ON(gl, ret);
	} else { /* lock_nolock */
		finish_xmote(gl, target);
567 568 569
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	}
570

571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
	spin_lock(&gl->gl_spin);
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
599 600
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
601 602
{
	struct gfs2_holder *gh = NULL;
603
	int ret;
604 605 606 607 608 609 610 611 612

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
613
			goto out_unlock;
614 615 616
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
617
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
618 619 620 621
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
622 623
		ret = do_promote(gl);
		if (ret == 0)
624
			goto out_unlock;
625
		if (ret == 2)
626
			goto out;
627 628 629 630 631 632
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
633
out:
634 635 636
	return;

out_sched:
637 638
	clear_bit(GLF_LOCK, &gl->gl_flags);
	smp_mb__after_clear_bit();
639 640
	gfs2_glock_hold(gl);
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
641
		gfs2_glock_put_nolock(gl);
642 643
	return;

644
out_unlock:
645
	clear_bit(GLF_LOCK, &gl->gl_flags);
646 647
	smp_mb__after_clear_bit();
	return;
648 649
}

650 651 652 653
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
654
	struct gfs2_inode *ip;
655
	struct inode *inode;
656 657 658 659
	u64 no_addr = gl->gl_name.ln_number;

	ip = gl->gl_object;
	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
660 661

	if (ip)
662
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
663 664 665 666 667
	else
		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
668 669 670 671
	}
	gfs2_glock_put(gl);
}

672 673
static void glock_work_func(struct work_struct *work)
{
674
	unsigned long delay = 0;
675
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
676
	int drop_ref = 0;
677

678
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
679
		finish_xmote(gl, gl->gl_reply);
680 681
		drop_ref = 1;
	}
682
	spin_lock(&gl->gl_spin);
683
	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
684 685
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
686
		unsigned long holdtime, now = jiffies;
687

688
		holdtime = gl->gl_tchange + gl->gl_hold_time;
689 690
		if (time_before(now, holdtime))
			delay = holdtime - now;
691 692 693 694 695

		if (!delay) {
			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
			set_bit(GLF_DEMOTE, &gl->gl_flags);
		}
696 697
	}
	run_queue(gl, 0);
698
	spin_unlock(&gl->gl_spin);
699
	if (!delay)
700
		gfs2_glock_put(gl);
701 702 703 704 705 706
	else {
		if (gl->gl_name.ln_type != LM_TYPE_INODE)
			delay = 0;
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
			gfs2_glock_put(gl);
	}
707 708
	if (drop_ref)
		gfs2_glock_put(gl);
709 710
}

David Teigland's avatar
David Teigland committed
711 712 713 714 715 716 717 718 719 720 721 722 723
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

724
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
725
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
726 727
		   struct gfs2_glock **glp)
{
728
	struct super_block *s = sdp->sd_vfs;
729
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teigland's avatar
David Teigland committed
730
	struct gfs2_glock *gl, *tmp;
731
	unsigned int hash = gl_hash(sdp, &name);
732
	struct address_space *mapping;
733
	struct kmem_cache *cachep;
David Teigland's avatar
David Teigland committed
734

735
	rcu_read_lock();
736
	gl = search_bucket(hash, sdp, &name);
737
	rcu_read_unlock();
David Teigland's avatar
David Teigland committed
738

739 740
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
741
		return 0;
742 743
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
744

745
	if (glops->go_flags & GLOF_ASPACE)
746
		cachep = gfs2_glock_aspace_cachep;
747
	else
748 749
		cachep = gfs2_glock_cachep;
	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
David Teigland's avatar
David Teigland committed
750 751 752
	if (!gl)
		return -ENOMEM;

753
	atomic_inc(&sdp->sd_glock_disposal);
754
	gl->gl_sbd = sdp;
755
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
756
	gl->gl_name = name;
757
	atomic_set(&gl->gl_ref, 1);
David Teigland's avatar
David Teigland committed
758
	gl->gl_state = LM_ST_UNLOCKED;
759
	gl->gl_target = LM_ST_UNLOCKED;
760
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
761
	gl->gl_hash = hash;
David Teigland's avatar
David Teigland committed
762
	gl->gl_ops = glops;
763 764 765 766 767 768 769
	gl->gl_dstamp = ktime_set(0, 0);
	preempt_disable();
	/* We use the global stats to estimate the initial per-glock stats */
	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
	preempt_enable();
	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
770
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
771
	memset(gl->gl_lvb, 0, 32 * sizeof(char));
772
	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
773
	gl->gl_tchange = jiffies;
774
	gl->gl_object = NULL;
775
	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
776
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
777
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
778

779 780 781 782 783 784 785 786 787
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
		mapping->assoc_mapping = NULL;
		mapping->backing_dev_info = s->s_bdi;
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
788 789
	}

790
	spin_lock_bucket(hash);
791
	tmp = search_bucket(hash, sdp, &name);
David Teigland's avatar
David Teigland committed
792
	if (tmp) {
793 794
		spin_unlock_bucket(hash);
		kmem_cache_free(cachep, gl);
795
		atomic_dec(&sdp->sd_glock_disposal);
David Teigland's avatar
David Teigland committed
796 797
		gl = tmp;
	} else {
798 799
		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
		spin_unlock_bucket(hash);
David Teigland's avatar
David Teigland committed
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	}

	*glp = gl;

	return 0;
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

816
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teigland's avatar
David Teigland committed
817 818 819 820
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
821
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
822
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

840
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
841 842
{
	gh->gh_state = state;
843
	gh->gh_flags = flags;
844
	gh->gh_iflags = 0;
845
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
Bob Peterson's avatar
Bob Peterson committed
846 847 848
	if (gh->gh_owner_pid)
		put_pid(gh->gh_owner_pid);
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
849 850 851 852 853 854 855 856 857 858
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
859
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
860 861
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
862
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
863 864
}

865 866 867 868 869 870 871 872 873 874
/**
 * gfs2_glock_holder_wait
 * @word: unused
 *
 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 * field. Thus I've separated these otherwise identical functions in
 * order to be more informative to the user.
 */

static int gfs2_glock_holder_wait(void *word)
875 876 877 878 879
{
        schedule();
        return 0;
}

880 881 882 883 884 885
static int gfs2_glock_demote_wait(void *word)
{
	schedule();
	return 0;
}

886
static void wait_on_holder(struct gfs2_holder *gh)
887
{
888 889
	unsigned long time1 = jiffies;

890
	might_sleep();
891
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
892 893 894 895 896
	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
		/* Lengthen the minimum hold time. */
		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
					      GL_GLOCK_HOLD_INCR,
					      GL_GLOCK_MAX_HOLD);
897 898
}

899
static void wait_on_demote(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
900
{
901
	might_sleep();
902
	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
David Teigland's avatar
David Teigland committed
903 904 905
}

/**
906 907 908
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
909
 *
910 911
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
912 913
 */

914
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
915
			    unsigned long delay)
David Teigland's avatar
David Teigland committed
916
{
917
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
918

919 920 921 922 923 924 925
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
926
	}
927 928
	if (gl->gl_ops->go_callback)
		gl->gl_ops->go_callback(gl);
Steven Whitehouse's avatar
Steven Whitehouse committed
929
	trace_gfs2_demote_rq(gl);
David Teigland's avatar
David Teigland committed
930 931 932
}

/**
933
 * gfs2_glock_wait - wait on a glock acquisition
David Teigland's avatar
David Teigland committed
934 935 936 937 938
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

939
int gfs2_glock_wait(struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
940
{
941
	wait_on_holder(gh);
David Teigland's avatar
David Teigland committed
942 943 944
	return gh->gh_error;
}

945
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
946
{
947
	struct va_format vaf;
948 949 950
	va_list args;

	va_start(args, fmt);
951

952
	if (seq) {
953
		seq_vprintf(seq, fmt, args);
954
	} else {
955 956 957 958
		vaf.fmt = fmt;
		vaf.va = &args;

		printk(KERN_ERR " %pV", &vaf);
959
	}
960

961 962 963
	va_end(args);
}

David Teigland's avatar
David Teigland committed
964 965 966 967
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
968 969 970 971
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
972 973
 */

974
static inline void add_to_queue(struct gfs2_holder *gh)
975 976
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
David Teigland's avatar
David Teigland committed
977 978
{
	struct gfs2_glock *gl = gh->gh_gl;
979 980 981 982
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
	int try_lock = 0;
David Teigland's avatar
David Teigland committed
983

984
	BUG_ON(gh->gh_owner_pid == NULL);
985 986
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
987

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
			try_lock = 1;
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
		if (try_lock &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
		    !may_grant(gl, gh)) {
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
1006
		}
1007 1008 1009 1010 1011
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
1012
	set_bit(GLF_QUEUED, &gl->gl_flags);
1013
	trace_gfs2_glock_queue(gh, 1);