glock.c 45.1 KB
Newer Older
David Teigland's avatar
David Teigland committed
1 2
/*
 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3
 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
David Teigland's avatar
David Teigland committed
4 5 6
 *
 * This copyrighted material is made available to anyone wishing to use,
 * modify, copy, or redistribute it subject to the terms and conditions
7
 * of the GNU General Public License version 2.
David Teigland's avatar
David Teigland committed
8 9 10 11 12 13 14 15 16
 */

#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
#include <linux/jhash.h>
17
#include <linux/kallsyms.h>
18
#include <linux/gfs2_ondisk.h>
19
#include <linux/list.h>
20
#include <linux/wait.h>
akpm@linux-foundation.org's avatar
akpm@linux-foundation.org committed
21
#include <linux/module.h>
David Teigland's avatar
David Teigland committed
22
#include <asm/uaccess.h>
23 24
#include <linux/seq_file.h>
#include <linux/debugfs.h>
25 26
#include <linux/kthread.h>
#include <linux/freezer.h>
27 28
#include <linux/workqueue.h>
#include <linux/jiffies.h>
29 30 31
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/bit_spinlock.h>
David Teigland's avatar
David Teigland committed
32 33

#include "gfs2.h"
34
#include "incore.h"
David Teigland's avatar
David Teigland committed
35 36 37 38 39 40 41
#include "glock.h"
#include "glops.h"
#include "inode.h"
#include "lops.h"
#include "meta_io.h"
#include "quota.h"
#include "super.h"
42
#include "util.h"
43
#include "bmap.h"
Steven Whitehouse's avatar
Steven Whitehouse committed
44 45
#define CREATE_TRACE_POINTS
#include "trace_gfs2.h"
David Teigland's avatar
David Teigland committed
46

47 48 49 50 51
struct gfs2_glock_iter {
	int hash;			/* hash bucket index         */
	struct gfs2_sbd *sdp;		/* incore superblock         */
	struct gfs2_glock *gl;		/* current glock struct      */
	char string[512];		/* scratch space             */
52 53
};

David Teigland's avatar
David Teigland committed
54 55
typedef void (*glock_examiner) (struct gfs2_glock * gl);

56 57 58
static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
59

60
static struct dentry *gfs2_root;
61
static struct workqueue_struct *glock_workqueue;
62
struct workqueue_struct *gfs2_delete_workqueue;
63 64
static LIST_HEAD(lru_list);
static atomic_t lru_count = ATOMIC_INIT(0);
Julia Lawall's avatar
Julia Lawall committed
65
static DEFINE_SPINLOCK(lru_lock);
66

67
#define GFS2_GL_HASH_SHIFT      15
68 69 70
#define GFS2_GL_HASH_SIZE       (1 << GFS2_GL_HASH_SHIFT)
#define GFS2_GL_HASH_MASK       (GFS2_GL_HASH_SIZE - 1)

71
static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
72
static struct dentry *gfs2_root;
73

David Teigland's avatar
David Teigland committed
74 75 76 77 78 79 80
/**
 * gl_hash() - Turn glock number into hash bucket number
 * @lock: The glock number
 *
 * Returns: The number of the corresponding hash bucket
 */

81 82
static unsigned int gl_hash(const struct gfs2_sbd *sdp,
			    const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
83 84 85
{
	unsigned int h;

86
	h = jhash(&name->ln_number, sizeof(u64), 0);
David Teigland's avatar
David Teigland committed
87
	h = jhash(&name->ln_type, sizeof(unsigned int), h);
88
	h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
David Teigland's avatar
David Teigland committed
89 90 91 92 93
	h &= GFS2_GL_HASH_MASK;

	return h;
}

94 95 96 97 98
static inline void spin_lock_bucket(unsigned int hash)
{
	struct hlist_bl_head *bl = &gl_hash_table[hash];
	bit_spin_lock(0, (unsigned long *)bl);
}
David Teigland's avatar
David Teigland committed
99

100 101 102 103 104
static inline void spin_unlock_bucket(unsigned int hash)
{
	struct hlist_bl_head *bl = &gl_hash_table[hash];
	__bit_spin_unlock(0, (unsigned long *)bl);
}
David Teigland's avatar
David Teigland committed
105

106
static void gfs2_glock_dealloc(struct rcu_head *rcu)
David Teigland's avatar
David Teigland committed
107
{
108
	struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
David Teigland's avatar
David Teigland committed
109

110 111 112 113
	if (gl->gl_ops->go_flags & GLOF_ASPACE)
		kmem_cache_free(gfs2_glock_aspace_cachep, gl);
	else
		kmem_cache_free(gfs2_glock_cachep, gl);
114 115 116
}

void gfs2_glock_free(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
117 118 119
{
	struct gfs2_sbd *sdp = gl->gl_sbd;

120
	call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
121 122
	if (atomic_dec_and_test(&sdp->sd_glock_disposal))
		wake_up(&sdp->sd_glock_wait);
David Teigland's avatar
David Teigland committed
123 124 125 126 127 128 129 130
}

/**
 * gfs2_glock_hold() - increment reference count on glock
 * @gl: The glock to hold
 *
 */

131
void gfs2_glock_hold(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
132
{
133
	GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
134
	atomic_inc(&gl->gl_ref);
David Teigland's avatar
David Teigland committed
135 136
}

137 138 139 140 141 142 143 144 145 146 147
/**
 * demote_ok - Check to see if it's ok to unlock a glock
 * @gl: the glock
 *
 * Returns: 1 if it's ok
 */

static int demote_ok(const struct gfs2_glock *gl)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;

148 149
	/* assert_spin_locked(&gl->gl_spin); */

150 151
	if (gl->gl_state == LM_ST_UNLOCKED)
		return 0;
152 153 154 155
	if (test_bit(GLF_LFLUSH, &gl->gl_flags))
		return 0;
	if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
	    !list_empty(&gl->gl_holders))
156 157 158 159 160 161
		return 0;
	if (glops->go_demote_ok)
		return glops->go_demote_ok(gl);
	return 1;
}

162

163 164 165 166 167 168 169 170 171 172
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
	spin_lock(&lru_lock);

	if (!list_empty(&gl->gl_lru))
		list_del_init(&gl->gl_lru);
	else
		atomic_inc(&lru_count);

	list_add_tail(&gl->gl_lru, &lru_list);
173
	set_bit(GLF_LRU, &gl->gl_flags);
174 175 176
	spin_unlock(&lru_lock);
}

177
/**
178
 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
179 180
 * @gl: the glock
 *
181 182
 * If the glock is demotable, then we add it (or move it) to the end
 * of the glock LRU list.
183 184
 */

185
static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
186
{
187 188
	if (demote_ok(gl))
		gfs2_glock_add_to_lru(gl);
189 190
}

191 192 193 194 195 196 197 198
/**
 * gfs2_glock_put_nolock() - Decrement reference count on glock
 * @gl: The glock to put
 *
 * This function should only be used if the caller has its own reference
 * to the glock, in addition to the one it is dropping.
 */

199
void gfs2_glock_put_nolock(struct gfs2_glock *gl)
200 201 202 203 204
{
	if (atomic_dec_and_test(&gl->gl_ref))
		GLOCK_BUG_ON(gl, 1);
}

David Teigland's avatar
David Teigland committed
205 206 207 208 209 210
/**
 * gfs2_glock_put() - Decrement reference count on glock
 * @gl: The glock to put
 *
 */

211
void gfs2_glock_put(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
212
{
213 214
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct address_space *mapping = gfs2_glock2aspace(gl);
David Teigland's avatar
David Teigland committed
215

216 217 218 219 220
	if (atomic_dec_and_test(&gl->gl_ref)) {
		spin_lock_bucket(gl->gl_hash);
		hlist_bl_del_rcu(&gl->gl_list);
		spin_unlock_bucket(gl->gl_hash);
		spin_lock(&lru_lock);
221 222 223 224 225
		if (!list_empty(&gl->gl_lru)) {
			list_del_init(&gl->gl_lru);
			atomic_dec(&lru_count);
		}
		spin_unlock(&lru_lock);
226
		GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
227 228 229
		GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
		trace_gfs2_glock_put(gl);
		sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
David Teigland's avatar
David Teigland committed
230 231 232 233 234 235 236 237 238 239 240
	}
}

/**
 * search_bucket() - Find struct gfs2_glock by lock number
 * @bucket: the bucket to search
 * @name: The lock name
 *
 * Returns: NULL, or the struct gfs2_glock with the requested number
 */

241
static struct gfs2_glock *search_bucket(unsigned int hash,
242
					const struct gfs2_sbd *sdp,
243
					const struct lm_lockname *name)
David Teigland's avatar
David Teigland committed
244 245
{
	struct gfs2_glock *gl;
246
	struct hlist_bl_node *h;
David Teigland's avatar
David Teigland committed
247

248
	hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
David Teigland's avatar
David Teigland committed
249 250
		if (!lm_name_equal(&gl->gl_name, name))
			continue;
251 252
		if (gl->gl_sbd != sdp)
			continue;
253 254
		if (atomic_inc_not_zero(&gl->gl_ref))
			return gl;
David Teigland's avatar
David Teigland committed
255 256 257 258 259
	}

	return NULL;
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
/**
 * may_grant - check if its ok to grant a new lock
 * @gl: The glock
 * @gh: The lock request which we wish to grant
 *
 * Returns: true if its ok to grant the lock
 */

static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{
	const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
	if ((gh->gh_state == LM_ST_EXCLUSIVE ||
	     gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
		return 0;
	if (gl->gl_state == gh->gh_state)
		return 1;
	if (gh->gh_flags & GL_EXACT)
		return 0;
278 279 280 281 282 283
	if (gl->gl_state == LM_ST_EXCLUSIVE) {
		if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
			return 1;
		if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
			return 1;
	}
284 285 286 287 288 289 290 291 292 293 294 295
	if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
		return 1;
	return 0;
}

static void gfs2_holder_wake(struct gfs2_holder *gh)
{
	clear_bit(HIF_WAIT, &gh->gh_iflags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
/**
 * do_error - Something unexpected has happened during a lock request
 *
 */

static inline void do_error(struct gfs2_glock *gl, const int ret)
{
	struct gfs2_holder *gh, *tmp;

	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (ret & LM_OUT_ERROR)
			gh->gh_error = -EIO;
		else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
			gh->gh_error = GLR_TRYFAILED;
		else
			continue;
		list_del_init(&gh->gh_list);
		trace_gfs2_glock_queue(gh, 0);
		gfs2_holder_wake(gh);
	}
}

320 321 322 323
/**
 * do_promote - promote as many requests as possible on the current queue
 * @gl: The glock
 * 
324 325
 * Returns: 1 if there is a blocked holder at the head of the list, or 2
 *          if a type specific operation is underway.
326 327 328
 */

static int do_promote(struct gfs2_glock *gl)
329 330
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh, *tmp;
	int ret;

restart:
	list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			continue;
		if (may_grant(gl, gh)) {
			if (gh->gh_list.prev == &gl->gl_holders &&
			    glops->go_lock) {
				spin_unlock(&gl->gl_spin);
				/* FIXME: eliminate this eventually */
				ret = glops->go_lock(gh);
				spin_lock(&gl->gl_spin);
				if (ret) {
348 349
					if (ret == 1)
						return 2;
350 351
					gh->gh_error = ret;
					list_del_init(&gh->gh_list);
Steven Whitehouse's avatar
Steven Whitehouse committed
352
					trace_gfs2_glock_queue(gh, 0);
353 354 355 356
					gfs2_holder_wake(gh);
					goto restart;
				}
				set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
357
				trace_gfs2_promote(gh, 1);
358 359 360 361
				gfs2_holder_wake(gh);
				goto restart;
			}
			set_bit(HIF_HOLDER, &gh->gh_iflags);
Steven Whitehouse's avatar
Steven Whitehouse committed
362
			trace_gfs2_promote(gh, 0);
363 364 365 366 367
			gfs2_holder_wake(gh);
			continue;
		}
		if (gh->gh_list.prev == &gl->gl_holders)
			return 1;
368
		do_error(gl, 0);
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
		break;
	}
	return 0;
}

/**
 * find_first_waiter - find the first gh that's waiting for the glock
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	list_for_each_entry(gh, &gl->gl_holders, gh_list) {
		if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * state_change - record that the glock is now in a different state
 * @gl: the glock
 * @new_state the new state
 *
 */

static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
	int held1, held2;

	held1 = (gl->gl_state != LM_ST_UNLOCKED);
	held2 = (new_state != LM_ST_UNLOCKED);

	if (held1 != held2) {
		if (held2)
			gfs2_glock_hold(gl);
		else
408
			gfs2_glock_put_nolock(gl);
409
	}
410 411
	if (held1 && held2 && list_empty(&gl->gl_holders))
		clear_bit(GLF_QUEUED, &gl->gl_flags);
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436

	gl->gl_state = new_state;
	gl->gl_tchange = jiffies;
}

static void gfs2_demote_wake(struct gfs2_glock *gl)
{
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
	clear_bit(GLF_DEMOTE, &gl->gl_flags);
	smp_mb__after_clear_bit();
	wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
}

/**
 * finish_xmote - The DLM has replied to one of our lock requests
 * @gl: The glock
 * @ret: The status from the DLM
 *
 */

static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_holder *gh;
	unsigned state = ret & LM_OUT_ST_MASK;
437
	int rv;
438 439

	spin_lock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
440
	trace_gfs2_glock_state_change(gl, state);
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	state_change(gl, state);
	gh = find_first_waiter(gl);

	/* Demote to UN request arrived during demote to SH or DF */
	if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
	    state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
		gl->gl_target = LM_ST_UNLOCKED;

	/* Check for state != intended state */
	if (unlikely(state != gl->gl_target)) {
		if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
			/* move to back of queue and try next entry */
			if (ret & LM_OUT_CANCELED) {
				if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
					list_move_tail(&gh->gh_list, &gl->gl_holders);
				gh = find_first_waiter(gl);
				gl->gl_target = gh->gh_state;
				goto retry;
			}
			/* Some error or failed "try lock" - report it */
			if ((ret & LM_OUT_ERROR) ||
			    (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
				gl->gl_target = gl->gl_state;
				do_error(gl, ret);
				goto out;
			}
		}
		switch(state) {
		/* Unlocked due to conversion deadlock, try again */
		case LM_ST_UNLOCKED:
retry:
			do_xmote(gl, gh, gl->gl_target);
			break;
		/* Conversion fails, unlock and try again */
		case LM_ST_SHARED:
		case LM_ST_DEFERRED:
			do_xmote(gl, gh, LM_ST_UNLOCKED);
			break;
		default: /* Everything else */
			printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
			GLOCK_BUG_ON(gl, 1);
		}
		spin_unlock(&gl->gl_spin);
		return;
	}

	/* Fast path - we got what we asked for */
	if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
		gfs2_demote_wake(gl);
	if (state != LM_ST_UNLOCKED) {
		if (glops->go_xmote_bh) {
			spin_unlock(&gl->gl_spin);
			rv = glops->go_xmote_bh(gl, gh);
			spin_lock(&gl->gl_spin);
			if (rv) {
				do_error(gl, rv);
				goto out;
			}
		}
500 501 502
		rv = do_promote(gl);
		if (rv == 2)
			goto out_locked;
503 504 505
	}
out:
	clear_bit(GLF_LOCK, &gl->gl_flags);
506
out_locked:
507 508 509 510 511 512 513 514 515 516 517 518
	spin_unlock(&gl->gl_spin);
}

/**
 * do_xmote - Calls the DLM to change the state of a lock
 * @gl: The lock state
 * @gh: The holder (only for promotes)
 * @target: The target lock state
 *
 */

static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
519 520
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
521 522 523 524 525 526 527 528
{
	const struct gfs2_glock_operations *glops = gl->gl_ops;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	unsigned int lck_flags = gh ? gh->gh_flags : 0;
	int ret;

	lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
		      LM_FLAG_PRIORITY);
529 530
	GLOCK_BUG_ON(gl, gl->gl_state == target);
	GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
531 532 533 534 535
	if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
	    glops->go_inval) {
		set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
		do_error(gl, 0); /* Fail queued try locks */
	}
536
	gl->gl_req = target;
537 538 539 540 541 542 543 544 545 546 547 548 549
	spin_unlock(&gl->gl_spin);
	if (glops->go_xmote_th)
		glops->go_xmote_th(gl);
	if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
		glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
	clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);

	gfs2_glock_hold(gl);
	if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
	    gl->gl_state == LM_ST_DEFERRED) &&
	    !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
		lck_flags |= LM_FLAG_TRY_1CB;

550 551 552 553 554 555
	if (sdp->sd_lockstruct.ls_ops->lm_lock)	{
		/* lock_dlm */
		ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
		GLOCK_BUG_ON(gl, ret);
	} else { /* lock_nolock */
		finish_xmote(gl, target);
556 557 558
		if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
			gfs2_glock_put(gl);
	}
559

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587
	spin_lock(&gl->gl_spin);
}

/**
 * find_first_holder - find the first "holder" gh
 * @gl: the glock
 */

static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
{
	struct gfs2_holder *gh;

	if (!list_empty(&gl->gl_holders)) {
		gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
		if (test_bit(HIF_HOLDER, &gh->gh_iflags))
			return gh;
	}
	return NULL;
}

/**
 * run_queue - do all outstanding tasks related to a glock
 * @gl: The glock in question
 * @nonblock: True if we must not block in run_queue
 *
 */

static void run_queue(struct gfs2_glock *gl, const int nonblock)
588 589
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
590 591
{
	struct gfs2_holder *gh = NULL;
592
	int ret;
593 594 595 596 597 598 599 600 601

	if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
		return;

	GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));

	if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
	    gl->gl_demote_state != gl->gl_state) {
		if (find_first_holder(gl))
602
			goto out_unlock;
603 604 605
		if (nonblock)
			goto out_sched;
		set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
606
		GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
607 608 609 610
		gl->gl_target = gl->gl_demote_state;
	} else {
		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
			gfs2_demote_wake(gl);
611 612
		ret = do_promote(gl);
		if (ret == 0)
613
			goto out_unlock;
614
		if (ret == 2)
615
			goto out;
616 617 618 619 620 621
		gh = find_first_waiter(gl);
		gl->gl_target = gh->gh_state;
		if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
			do_error(gl, 0); /* Fail queued try locks */
	}
	do_xmote(gl, gh, gl->gl_target);
622
out:
623 624 625
	return;

out_sched:
626 627
	clear_bit(GLF_LOCK, &gl->gl_flags);
	smp_mb__after_clear_bit();
628 629
	gfs2_glock_hold(gl);
	if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
630
		gfs2_glock_put_nolock(gl);
631 632
	return;

633
out_unlock:
634
	clear_bit(GLF_LOCK, &gl->gl_flags);
635 636
	smp_mb__after_clear_bit();
	return;
637 638
}

639 640 641 642
static void delete_work_func(struct work_struct *work)
{
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
	struct gfs2_sbd *sdp = gl->gl_sbd;
643
	struct gfs2_inode *ip;
644
	struct inode *inode;
645 646 647 648
	u64 no_addr = gl->gl_name.ln_number;

	ip = gl->gl_object;
	/* Note: Unsafe to dereference ip as we don't hold right refs/locks */
649 650 651

	if (ip)
		inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
652 653 654 655 656
	else
		inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
	if (inode && !IS_ERR(inode)) {
		d_prune_aliases(inode);
		iput(inode);
657 658 659 660
	}
	gfs2_glock_put(gl);
}

661 662
static void glock_work_func(struct work_struct *work)
{
663
	unsigned long delay = 0;
664
	struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
665
	int drop_ref = 0;
666

667
	if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
668
		finish_xmote(gl, gl->gl_reply);
669 670
		drop_ref = 1;
	}
671
	spin_lock(&gl->gl_spin);
672 673 674
	if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
	    gl->gl_state != LM_ST_UNLOCKED &&
	    gl->gl_demote_state != LM_ST_EXCLUSIVE) {
675 676 677 678 679 680 681
		unsigned long holdtime, now = jiffies;
		holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
		if (time_before(now, holdtime))
			delay = holdtime - now;
		set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
	}
	run_queue(gl, 0);
682
	spin_unlock(&gl->gl_spin);
683 684 685
	if (!delay ||
	    queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
		gfs2_glock_put(gl);
686 687
	if (drop_ref)
		gfs2_glock_put(gl);
688 689
}

David Teigland's avatar
David Teigland committed
690 691 692 693 694 695 696 697 698 699 700 701 702
/**
 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
 * @sdp: The GFS2 superblock
 * @number: the lock number
 * @glops: The glock_operations to use
 * @create: If 0, don't create the glock if it doesn't exist
 * @glp: the glock is returned here
 *
 * This does not lock a glock, just finds/creates structures for one.
 *
 * Returns: errno
 */

703
int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
704
		   const struct gfs2_glock_operations *glops, int create,
David Teigland's avatar
David Teigland committed
705 706
		   struct gfs2_glock **glp)
{
707
	struct super_block *s = sdp->sd_vfs;
708
	struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
David Teigland's avatar
David Teigland committed
709
	struct gfs2_glock *gl, *tmp;
710
	unsigned int hash = gl_hash(sdp, &name);
711
	struct address_space *mapping;
712
	struct kmem_cache *cachep;
David Teigland's avatar
David Teigland committed
713

714
	rcu_read_lock();
715
	gl = search_bucket(hash, sdp, &name);
716
	rcu_read_unlock();
David Teigland's avatar
David Teigland committed
717

718 719
	*glp = gl;
	if (gl)
David Teigland's avatar
David Teigland committed
720
		return 0;
721 722
	if (!create)
		return -ENOENT;
David Teigland's avatar
David Teigland committed
723

724
	if (glops->go_flags & GLOF_ASPACE)
725
		cachep = gfs2_glock_aspace_cachep;
726
	else
727 728
		cachep = gfs2_glock_cachep;
	gl = kmem_cache_alloc(cachep, GFP_KERNEL);
David Teigland's avatar
David Teigland committed
729 730 731
	if (!gl)
		return -ENOMEM;

732
	atomic_inc(&sdp->sd_glock_disposal);
733
	gl->gl_flags = 0;
David Teigland's avatar
David Teigland committed
734
	gl->gl_name = name;
735
	atomic_set(&gl->gl_ref, 1);
David Teigland's avatar
David Teigland committed
736
	gl->gl_state = LM_ST_UNLOCKED;
737
	gl->gl_target = LM_ST_UNLOCKED;
738
	gl->gl_demote_state = LM_ST_EXCLUSIVE;
739
	gl->gl_hash = hash;
David Teigland's avatar
David Teigland committed
740
	gl->gl_ops = glops;
741 742 743
	snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
	memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
	gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
744
	gl->gl_tchange = jiffies;
745
	gl->gl_object = NULL;
David Teigland's avatar
David Teigland committed
746
	gl->gl_sbd = sdp;
747
	INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
748
	INIT_WORK(&gl->gl_delete, delete_work_func);
David Teigland's avatar
David Teigland committed
749

750 751 752 753 754 755 756 757 758
	mapping = gfs2_glock2aspace(gl);
	if (mapping) {
                mapping->a_ops = &gfs2_meta_aops;
		mapping->host = s->s_bdev->bd_inode;
		mapping->flags = 0;
		mapping_set_gfp_mask(mapping, GFP_NOFS);
		mapping->assoc_mapping = NULL;
		mapping->backing_dev_info = s->s_bdi;
		mapping->writeback_index = 0;
David Teigland's avatar
David Teigland committed
759 760
	}

761
	spin_lock_bucket(hash);
762
	tmp = search_bucket(hash, sdp, &name);
David Teigland's avatar
David Teigland committed
763
	if (tmp) {
764 765
		spin_unlock_bucket(hash);
		kmem_cache_free(cachep, gl);
766
		atomic_dec(&sdp->sd_glock_disposal);
David Teigland's avatar
David Teigland committed
767 768
		gl = tmp;
	} else {
769 770
		hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
		spin_unlock_bucket(hash);
David Teigland's avatar
David Teigland committed
771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
	}

	*glp = gl;

	return 0;
}

/**
 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
 * @gl: the glock
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 */

787
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
David Teigland's avatar
David Teigland committed
788 789 790 791
		      struct gfs2_holder *gh)
{
	INIT_LIST_HEAD(&gh->gh_list);
	gh->gh_gl = gl;
792
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
793
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810
	gh->gh_state = state;
	gh->gh_flags = flags;
	gh->gh_error = 0;
	gh->gh_iflags = 0;
	gfs2_glock_hold(gl);
}

/**
 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
 * @state: the state we're requesting
 * @flags: the modifier flags
 * @gh: the holder structure
 *
 * Don't mess with the glock.
 *
 */

811
void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
812 813
{
	gh->gh_state = state;
814
	gh->gh_flags = flags;
815
	gh->gh_iflags = 0;
816
	gh->gh_ip = (unsigned long)__builtin_return_address(0);
Bob Peterson's avatar
Bob Peterson committed
817 818 819
	if (gh->gh_owner_pid)
		put_pid(gh->gh_owner_pid);
	gh->gh_owner_pid = get_pid(task_pid(current));
David Teigland's avatar
David Teigland committed
820 821 822 823 824 825 826 827 828 829
}

/**
 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
 * @gh: the holder structure
 *
 */

void gfs2_holder_uninit(struct gfs2_holder *gh)
{
830
	put_pid(gh->gh_owner_pid);
David Teigland's avatar
David Teigland committed
831 832
	gfs2_glock_put(gh->gh_gl);
	gh->gh_gl = NULL;
833
	gh->gh_ip = 0;
David Teigland's avatar
David Teigland committed
834 835
}

836 837 838 839 840 841 842 843 844 845
/**
 * gfs2_glock_holder_wait
 * @word: unused
 *
 * This function and gfs2_glock_demote_wait both show up in the WCHAN
 * field. Thus I've separated these otherwise identical functions in
 * order to be more informative to the user.
 */

static int gfs2_glock_holder_wait(void *word)
846 847 848 849 850
{
        schedule();
        return 0;
}

851 852 853 854 855 856
static int gfs2_glock_demote_wait(void *word)
{
	schedule();
	return 0;
}

857
static void wait_on_holder(struct gfs2_holder *gh)
858
{
859
	might_sleep();
860
	wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
861 862
}

863
static void wait_on_demote(struct gfs2_glock *gl)
David Teigland's avatar
David Teigland committed
864
{
865
	might_sleep();
866
	wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
David Teigland's avatar
David Teigland committed
867 868 869
}

/**
870 871 872
 * handle_callback - process a demote request
 * @gl: the glock
 * @state: the state the caller wants us to change to
David Teigland's avatar
David Teigland committed
873
 *
874 875
 * There are only two requests that we are going to see in actual
 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
David Teigland's avatar
David Teigland committed
876 877
 */

878
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
879
			    unsigned long delay)
David Teigland's avatar
David Teigland committed
880
{
881
	int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
David Teigland's avatar
David Teigland committed
882

883 884 885 886 887 888 889
	set_bit(bit, &gl->gl_flags);
	if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
		gl->gl_demote_state = state;
		gl->gl_demote_time = jiffies;
	} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
			gl->gl_demote_state != state) {
		gl->gl_demote_state = LM_ST_UNLOCKED;
David Teigland's avatar
David Teigland committed
890
	}
891 892
	if (gl->gl_ops->go_callback)
		gl->gl_ops->go_callback(gl);
Steven Whitehouse's avatar
Steven Whitehouse committed
893
	trace_gfs2_demote_rq(gl);
David Teigland's avatar
David Teigland committed
894 895 896
}

/**
897
 * gfs2_glock_wait - wait on a glock acquisition
David Teigland's avatar
David Teigland committed
898 899 900 901 902
 * @gh: the glock holder
 *
 * Returns: 0 on success
 */

903
int gfs2_glock_wait(struct gfs2_holder *gh)
David Teigland's avatar
David Teigland committed
904
{
905
	wait_on_holder(gh);
David Teigland's avatar
David Teigland committed
906 907 908
	return gh->gh_error;
}

909
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
910
{
911
	struct va_format vaf;
912 913 914
	va_list args;

	va_start(args, fmt);
915

916 917
	if (seq) {
		struct gfs2_glock_iter *gi = seq->private;
918
		vsprintf(gi->string, fmt, args);
919 920
		seq_printf(seq, gi->string);
	} else {
921 922 923 924
		vaf.fmt = fmt;
		vaf.va = &args;

		printk(KERN_ERR " %pV", &vaf);
925
	}
926

927 928 929
	va_end(args);
}

David Teigland's avatar
David Teigland committed
930 931 932 933
/**
 * add_to_queue - Add a holder to the wait queue (but look for recursion)
 * @gh: the holder structure to add
 *
934 935 936 937
 * Eventually we should move the recursive locking trap to a
 * debugging option or something like that. This is the fast
 * path and needs to have the minimum number of distractions.
 * 
David Teigland's avatar
David Teigland committed
938 939
 */

940
static inline void add_to_queue(struct gfs2_holder *gh)
941 942
__releases(&gl->gl_spin)
__acquires(&gl->gl_spin)
David Teigland's avatar
David Teigland committed
943 944
{
	struct gfs2_glock *gl = gh->gh_gl;
945 946 947 948
	struct gfs2_sbd *sdp = gl->gl_sbd;
	struct list_head *insert_pt = NULL;
	struct gfs2_holder *gh2;
	int try_lock = 0;
David Teigland's avatar
David Teigland committed
949

950
	BUG_ON(gh->gh_owner_pid == NULL);
951 952
	if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
		BUG();
953

954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
	if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
		if (test_bit(GLF_LOCK, &gl->gl_flags))
			try_lock = 1;
		if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
			goto fail;
	}

	list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
		if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
		    (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
			goto trap_recursive;
		if (try_lock &&
		    !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
		    !may_grant(gl, gh)) {
fail:
			gh->gh_error = GLR_TRYFAILED;
			gfs2_holder_wake(gh);
			return;
972
		}
973 974 975 976 977
		if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
			continue;
		if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
			insert_pt = &gh2->gh_list;
	}
978
	set_bit(GLF_QUEUED, &gl->gl_flags);
979
	trace_gfs2_glock_queue(gh, 1);
980 981 982 983 984 985 986 987 988 989 990
	if (likely(insert_pt == NULL)) {
		list_add_tail(&gh->gh_list, &gl->gl_holders);
		if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
			goto do_cancel;
		return;
	}
	list_add_tail(&gh->gh_list, insert_pt);
do_cancel:
	gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
	if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
		spin_unlock(&gl->gl_spin);
Steven Whitehouse's avatar
Steven Whitehouse committed
991
		if (sdp->sd_lockstruct.ls_ops->lm_cancel)
992
			sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
993
		spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
994
	}
995
	return;
David Teigland's avatar
David Teigland committed
996

997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
trap_recursive:
	print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
	printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
	print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
	printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
	printk(KERN_ERR "lock type: %d req lock state : %d\n",
	       gh->gh_gl->gl_name.ln_type, gh->gh_state);
	__dump_glock(NULL, gl);
	BUG();
David Teigland's avatar
David Teigland committed
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
}

/**
 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
 * @gh: the holder structure
 *
 * if (gh->gh_flags & GL_ASYNC), this never returns an error
 *
 * Returns: 0, GLR_TRYFAILED, or errno on failure
 */

int gfs2_glock_nq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
	struct gfs2_sbd *sdp = gl->gl_sbd;
	int error = 0;

1025
	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
David Teigland's avatar
David Teigland committed
1026 1027 1028 1029
		return -EIO;

	spin_lock(&gl->gl_spin);
	add_to_queue(gh);
1030 1031 1032
	if ((LM_FLAG_NOEXP & gh->gh_flags) &&
	    test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
		set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1033
	run_queue(gl, 1);
David Teigland's avatar
David Teigland committed
1034 1035
	spin_unlock(&gl->gl_spin);

1036 1037
	if (!(gh->gh_flags & GL_ASYNC))
		error = gfs2_glock_wait(gh);
David Teigland's avatar
David Teigland committed
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050

	return error;
}

/**
 * gfs2_glock_poll - poll to see if an async request has been completed
 * @gh: the holder
 *
 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
 */

int gfs2_glock_poll(struct gfs2_holder *gh)
{
1051
	return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
David Teigland's avatar
David Teigland committed
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
}

/**
 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
 * @gh: the glock holder
 *
 */

void gfs2_glock_dq(struct gfs2_holder *gh)
{
	struct gfs2_glock *gl = gh->gh_gl;
1063
	const struct gfs2_glock_operations *glops = gl->gl_ops;
1064
	unsigned delay = 0;
1065
	int fast_path = 0;
David Teigland's avatar
David Teigland committed
1066

1067
	spin_lock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1068
	if (gh->gh_flags & GL_NOCACHE)
1069
		handle_callback(gl, LM_ST_UNLOCKED, 0);
David Teigland's avatar
David Teigland committed
1070 1071

	list_del_init(&gh->gh_list);
1072
	if (find_first_holder(gl) == NULL) {
1073
		if (glops->go_unlock) {
1074
			GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
1075
			spin_unlock(&gl->gl_spin);
David Teigland's avatar
David Teigland committed
1076
			glops->go_unlock(gh);
1077
			spin_lock(&gl->gl_spin);
1078
			clear_bit(GLF_LOCK, &gl->gl_flags);
1079
		}
1080 1081