glock.c 51.4 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
17
#include <linux/kallsyms.h>
18
#include <linux/gfs2_ondisk.h>
19
#include <linux/list.h>
20
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
21
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
22
#include <asm/uaccess.h>
23 24
#include <linux/seq_file.h>
#include <linux/debugfs.h>
25 26
#include <linux/kthread.h>
#include <linux/freezer.h>
27 28
#include <linux/workqueue.h>
#include <linux/jiffies.h>
29 30 31
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
32
#include <linux/percpu.h>
David Teigland's avatar
David Teigland committed
33 34

#include "gfs2.h"
35
#include "incore.h"
David Teigland's avatar
David Teigland committed
36 37 38 39 40 41 42
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
43
#include "util.h"
44
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
45 46
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
47

48
struct gfs2_glock_iter {
49 50 51 52 53 54
	int hash;			/* hash bucket index           */
	unsigned nhash;			/* Index within current bucket */
	struct gfs2_sbd *sdp;		/* incore superblock           */
	struct gfs2_glock *gl;		/* current glock struct        */
	loff_t last_pos;		/* last position               */
	char string[512];		/* scratch space               */
55 56
};

David Teigland's avatar
David Teigland committed
57 58
typedef void (*glock_examiner) (struct gfs2_glock * gl);

59 60 61
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
62

63
static struct dentry *gfs2_root;
64
static struct workqueue_struct *glock_workqueue;
65
struct workqueue_struct *gfs2_delete_workqueue;
66 67
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
68
static DEFINE_SPINLOCK(lru_lock);
69

70
#define GFS2_GL_HASH_SHIFT      15
71 72 73
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

74
static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
75
static struct dentry *gfs2_root;
76

David Teigland's avatar
David Teigland committed
77 78 79 80 81 82 83
/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

84 85
static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
86 87 88
{
	unsigned int h;

89
	h = jhash(&name->ln_number, sizeof(u64), 0);
David Teigland's avatar
David Teigland committed
90
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
91
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teigland's avatar
David Teigland committed
92 93 94 95 96
	h &= GFS2_GL_HASH_MASK;

	return h;
}

97 98
static inline void spin_lock_bucket(unsigned int hash)
{
99
	hlist_bl_lock(&gl_hash_table[hash]);
100
}
David Teigland's avatar
David Teigland committed
101

102 103
static inline void spin_unlock_bucket(unsigned int hash)
{
104
	hlist_bl_unlock(&gl_hash_table[hash]);
105
}
David Teigland's avatar
David Teigland committed
106

107
static void gfs2_glock_dealloc(struct rcu_head *rcu)
David Teigland's avatar
David Teigland committed
108
{
109
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
David Teigland's avatar
David Teigland committed
110

111 112 113 114
	if (gl->gl_ops->go_flags & GLOF_ASPACE)
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
	else
		kmem_cache_free(gfs2_glock_cachep, gl);
115 116 117
}

void gfs2_glock_free(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
118 119 120
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

121
	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
122 123
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
David Teigland's avatar
David Teigland committed
124 125 126 127 128 129 130 131
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

132
void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
133
{
134
	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
135
	atomic_inc(&gl->gl_ref);
David Teigland's avatar
David Teigland committed
136 137
}

138 139 140 141 142 143 144 145 146 147 148 149 150
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
151
	if (!list_empty(&gl->gl_holders))
152 153 154 155 156 157
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

158

159 160 161 162 163 164 165 166 167 168
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);

	if (!list_empty(&gl->gl_lru))
		list_del_init(&gl->gl_lru);
	else
		atomic_inc(&lru_count);

	list_add_tail(&gl->gl_lru, &lru_list);
169
	set_bit(GLF_LRU, &gl->gl_flags);
170 171 172
	spin_unlock(&lru_lock);
}

173
static void __gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
174 175 176 177 178 179
{
	if (!list_empty(&gl->gl_lru)) {
		list_del_init(&gl->gl_lru);
		atomic_dec(&lru_count);
		clear_bit(GLF_LRU, &gl->gl_flags);
	}
180 181 182 183 184 185
}

static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);
	__gfs2_glock_remove_from_lru(gl);
186 187 188
	spin_unlock(&lru_lock);
}

189
/**
190
 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
191 192
 * @gl: the glock
 *
193 194
 * If the glock is demotable, then we add it (or move it) to the end
 * of the glock LRU list.
195 196
 */

197
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
198
{
199 200
	if (demote_ok(gl))
		gfs2_glock_add_to_lru(gl);
201 202
}

203 204 205 206 207 208 209 210
/**
 * gfs2_glock_put_nolock() - Decrement reference count on glock
 * @gl: The glock to put
 *
 * This function should only be used if the caller has its own reference
 * to the glock, in addition to the one it is dropping.
 */

211
void gfs2_glock_put_nolock(struct gfs2_glock *gl)
212 213 214 215 216
{
	if (atomic_dec_and_test(&gl->gl_ref))
		GLOCK_BUG_ON(gl, 1);
}

David Teigland's avatar
David Teigland committed
217 218 219 220 221 222
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

223
void gfs2_glock_put(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
224
{
225 226
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = gfs2_glock2aspace(gl);
David Teigland's avatar
David Teigland committed
227

228 229 230
	if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
		__gfs2_glock_remove_from_lru(gl);
		spin_unlock(&lru_lock);
231 232 233
		spin_lock_bucket(gl->gl_hash);
		hlist_bl_del_rcu(&gl->gl_list);
		spin_unlock_bucket(gl->gl_hash);
234
		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
235 236 237
		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
		trace_gfs2_glock_put(gl);
		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teigland's avatar
David Teigland committed
238 239 240 241 242 243 244 245 246 247 248
	}
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

249
static struct gfs2_glock *search_bucket(unsigned int hash,
250
					const struct gfs2_sbd *sdp,
251
					const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
252 253
{
	struct gfs2_glock *gl;
254
	struct hlist_bl_node *h;
David Teigland's avatar
David Teigland committed
255

256
	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
David Teigland's avatar
David Teigland committed
257 258
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
259 260
		if (gl->gl_sbd != sdp)
			continue;
261 262
		if (atomic_inc_not_zero(&gl->gl_ref))
			return gl;
David Teigland's avatar
David Teigland committed
263 264 265 266 267
	}

	return NULL;
}

268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
286 287 288 289 290 291
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
292 293 294 295 296 297 298 299 300 301 302 303
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

328 329 330 331
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
332 333
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
334 335 336
 */

static int do_promote(struct gfs2_glock *gl)
337 338
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
				spin_unlock(&gl->gl_spin);
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
				spin_lock(&gl->gl_spin);
				if (ret) {
356 357
					if (ret == 1)
						return 2;
358 359
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
360
					trace_gfs2_glock_queue(gh, 0);
361 362 363 364
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
365
				trace_gfs2_promote(gh, 1);
366 367 368 369
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
370
			trace_gfs2_promote(gh, 0);
371 372 373 374 375
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
376
		do_error(gl, 0);
377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
		if (held2)
			gfs2_glock_hold(gl);
		else
416
			gfs2_glock_put_nolock(gl);
417
	}
418 419
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
420

421 422 423 424
	if (new_state != gl->gl_target)
		/* shorten our minimum hold time */
		gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
				       GL_GLOCK_MIN_HOLD);
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
449
	int rv;
450 451

	spin_lock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
452
	trace_gfs2_glock_state_change(gl, state);
453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
			GLOCK_BUG_ON(gl, 1);
		}
		spin_unlock(&gl->gl_spin);
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
			spin_unlock(&gl->gl_spin);
			rv = glops->go_xmote_bh(gl, gh);
			spin_lock(&gl->gl_spin);
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
512 513 514
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
515 516 517
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
518
out_locked:
519 520 521 522 523 524 525 526 527 528 529 530
	spin_unlock(&gl->gl_spin);
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
531 532
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
533 534 535 536 537 538 539 540
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
541 542
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
543 544 545 546 547
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
548
	gl->gl_req = target;
549 550 551 552 553
	set_bit(GLF_BLOCKING, &gl->gl_flags);
	if ((gl->gl_req == LM_ST_UNLOCKED) ||
	    (gl->gl_state == LM_ST_EXCLUSIVE) ||
	    (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
		clear_bit(GLF_BLOCKING, &gl->gl_flags);
554 555 556 557 558 559 560 561
	spin_unlock(&gl->gl_spin);
	if (glops->go_xmote_th)
		glops->go_xmote_th(gl);
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
562 563 564 565 566 567
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
		GLOCK_BUG_ON(gl, ret);
	} else { /* lock_nolock */
		finish_xmote(gl, target);
568 569 570
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	}
571

572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	spin_lock(&gl->gl_spin);
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
600 601
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
602 603
{
	struct gfs2_holder *gh = NULL;
604
	int ret;
605 606 607 608 609 610 611 612 613

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
614
			goto out_unlock;
615 616 617
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
618
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
619 620 621 622
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
623 624
		ret = do_promote(gl);
		if (ret == 0)
625
			goto out_unlock;
626
		if (ret == 2)
627
			goto out;
628 629 630 631 632 633
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
634
out:
635 636 637
	return;

out_sched:
638 639
	clear_bit(GLF_LOCK, &gl->gl_flags);
	smp_mb__after_clear_bit();
640 641
	gfs2_glock_hold(gl);
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
642
		gfs2_glock_put_nolock(gl);
643 644
	return;

645
out_unlock:
646
	clear_bit(GLF_LOCK, &gl->gl_flags);
647 648
	smp_mb__after_clear_bit();
	return;
649 650
}

651 652 653 654
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
655
	struct gfs2_inode *ip;
656
	struct inode *inode;
657 658 659 660
	u64 no_addr = gl->gl_name.ln_number;

	ip = gl->gl_object;
	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
661 662

	if (ip)
663
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr, 1);
664 665 666 667 668
	else
		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
669 670 671 672
	}
	gfs2_glock_put(gl);
}

673 674
static void glock_work_func(struct work_struct *work)
{
675
	unsigned long delay = 0;
676
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
677
	int drop_ref = 0;
678

679
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
680
		finish_xmote(gl, gl->gl_reply);
681 682
		drop_ref = 1;
	}
683
	spin_lock(&gl->gl_spin);
684
	if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
685 686
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
687
		unsigned long holdtime, now = jiffies;
688

689
		holdtime = gl->gl_tchange + gl->gl_hold_time;
690 691
		if (time_before(now, holdtime))
			delay = holdtime - now;
692 693 694 695 696

		if (!delay) {
			clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
			set_bit(GLF_DEMOTE, &gl->gl_flags);
		}
697 698
	}
	run_queue(gl, 0);
699
	spin_unlock(&gl->gl_spin);
700
	if (!delay)
701
		gfs2_glock_put(gl);
702 703 704 705 706 707
	else {
		if (gl->gl_name.ln_type != LM_TYPE_INODE)
			delay = 0;
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
			gfs2_glock_put(gl);
	}
708 709
	if (drop_ref)
		gfs2_glock_put(gl);
710 711
}

David Teigland's avatar
David Teigland committed
712 713 714 715 716 717 718 719 720 721 722 723 724
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

725
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
726
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
727 728
		   struct gfs2_glock **glp)
{
729
	struct super_block *s = sdp->sd_vfs;
730
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teigland's avatar
David Teigland committed
731
	struct gfs2_glock *gl, *tmp;
732
	unsigned int hash = gl_hash(sdp, &name);
733
	struct address_space *mapping;
734
	struct kmem_cache *cachep;
David Teigland's avatar
David Teigland committed
735

736
	rcu_read_lock();
737
	gl = search_bucket(hash, sdp, &name);
738
	rcu_read_unlock();
David Teigland's avatar
David Teigland committed
739

740 741
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
742
		return 0;
743 744
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
745

746
	if (glops->go_flags & GLOF_ASPACE)
747
		cachep = gfs2_glock_aspace_cachep;
748
	else
749 750
		cachep = gfs2_glock_cachep;
	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
David Teigland's avatar
David Teigland committed
751 752 753
	if (!gl)
		return -ENOMEM;

754
	atomic_inc(&sdp->sd_glock_disposal);
755
	gl->gl_sbd = sdp;
756
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
757
	gl->gl_name = name;
758
	atomic_set(&gl->gl_ref, 1);
David Teigland's avatar
David Teigland committed
759
	gl->gl_state = LM_ST_UNLOCKED;
760
	gl->gl_target = LM_ST_UNLOCKED;
761
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
762
	gl->gl_hash = hash;
David Teigland's avatar
David Teigland committed
763
	gl->gl_ops = glops;
764 765 766 767 768 769 770
	gl->gl_dstamp = ktime_set(0, 0);
	preempt_disable();
	/* We use the global stats to estimate the initial per-glock stats */
	gl->gl_stats = this_cpu_ptr(sdp->sd_lkstats)->lkstats[glops->go_type];
	preempt_enable();
	gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
	gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
771 772
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
773
	gl->gl_tchange = jiffies;
774
	gl->gl_object = NULL;
775
	gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
776
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
777
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
778

779 780 781 782 783 784 785 786 787
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
		mapping->assoc_mapping = NULL;
		mapping->backing_dev_info = s->s_bdi;
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
788 789
	}

790
	spin_lock_bucket(hash);
791
	tmp = search_bucket(hash, sdp, &name);
David Teigland's avatar
David Teigland committed
792
	if (tmp) {
793 794
		spin_unlock_bucket(hash);
		kmem_cache_free(cachep, gl);
795
		atomic_dec(&sdp->sd_glock_disposal);
David Teigland's avatar
David Teigland committed
796 797
		gl = tmp;
	} else {
798 799
		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
		spin_unlock_bucket(hash);
David Teigland's avatar
David Teigland committed
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	}

	*glp = gl;

	return 0;
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

816
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teigland's avatar
David Teigland committed
817 818 819 820
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
821
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
822
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

840
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
841 842
{
	gh->gh_state = state;
843
	gh->gh_flags = flags;
844
	gh->gh_iflags = 0;
845
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
Bob Peterson's avatar
Bob Peterson committed
846 847 848
	if (gh->gh_owner_pid)
		put_pid(gh->gh_owner_pid);
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
849 850 851 852 853 854 855 856 857 858
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
859
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
860 861
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
862
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
863 864
}

865 866 867 868 869 870 871 872 873 874
/**
 * gfs2_glock_holder_wait
 * @word: unused
 *
 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 * field. Thus I've separated these otherwise identical functions in
 * order to be more informative to the user.
 */

static int gfs2_glock_holder_wait(void *word)
875 876 877 878 879
{
        schedule();
        return 0;
}

880 881 882 883 884 885
static int gfs2_glock_demote_wait(void *word)
{
	schedule();
	return 0;
}

886
static void wait_on_holder(struct gfs2_holder *gh)
887
{
888 889
	unsigned long time1 = jiffies;

890
	might_sleep();
891
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
892 893 894 895 896
	if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
		/* Lengthen the minimum hold time. */
		gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
					      GL_GLOCK_HOLD_INCR,
					      GL_GLOCK_MAX_HOLD);
897 898
}

899
static void wait_on_demote(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
900
{
901
	might_sleep();
902
	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
David Teigland's avatar
David Teigland committed
903 904 905
}

/**
906 907 908
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
909
 *
910 911
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
912 913
 */

914
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
915
			    unsigned long delay)
David Teigland's avatar
David Teigland committed
916
{
917
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
918

919 920 921 922 923 924 925
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
926
	}
927 928
	if (gl->gl_ops->go_callback)
		gl->gl_ops->go_callback(gl);
Steven Whitehouse's avatar
Steven Whitehouse committed
929
	trace_gfs2_demote_rq(gl);
David Teigland's avatar
David Teigland committed
930 931 932
}

/**
933
 * gfs2_glock_wait - wait on a glock acquisition
David Teigland's avatar
David Teigland committed
934 935 936 937 938
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

939
int gfs2_glock_wait(struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
940
{
941
	wait_on_holder(gh);
David Teigland's avatar
David Teigland committed
942 943 944
	return gh->gh_error;
}

945
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
946
{
947
	struct va_format vaf;
948 949 950
	va_list args;

	va_start(args, fmt);
951

952 953
	if (seq) {
		struct gfs2_glock_iter *gi = seq->private;
954
		vsprintf(gi->string, fmt, args);
955
		seq_puts(seq, gi->string);
956
	} else {
957 958 959 960
		vaf.fmt = fmt;
		vaf.va = &args;

		printk(KERN_ERR " %pV", &vaf);
961
	}
962

963 964 965
	va_end(args);
}

David Teigland's avatar
David Teigland committed
966 967 968 969
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
970 971 972 973
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
974 975
 */

976
static inline void add_to_queue(struct gfs2_holder *gh)
977 978
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
David Teigland's avatar
David Teigland committed
979 980
{
	struct gfs2_glock *gl = gh->gh_gl;
981 982 983 984
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
	int try_lock = 0;
David Teigland's avatar
David Teigland committed
985

986
	BUG_ON(gh->gh_owner_pid == NULL);
987 988
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
989

990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
			try_lock = 1;
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
		if (try_lock &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
		    !may_grant(gl, gh)) {
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
1008
		}
1009 1010 1011 1012 1013
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
1014
	set_bit(GLF_QUEUED, &gl->gl_flags);
1015
	trace_gfs2_glock_queue(gh, 1);