Skip to content
Snippets Groups Projects
lockdep.c 87.3 KiB
Newer Older
Ingo Molnar's avatar
Ingo Molnar committed
#define STRICT_READ_CHECKS	1

static const char *state_names[] = {
#define LOCKDEP_STATE(__STATE) \
	STR(__STATE),
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};

static inline const char *state_name(enum lock_usage_bit bit)
{
	return state_names[bit >> 2];
}

static const char *state_rnames[] = {
#define LOCKDEP_STATE(__STATE) \
	STR(__STATE)"-READ",
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};

static inline const char *state_rname(enum lock_usage_bit bit)
{
	return state_rnames[bit >> 2];
}

static int (*state_verbose_f[])(struct lock_class *class) = {
#define LOCKDEP_STATE(__STATE) \
	__STATE##_verbose,
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};

static inline int state_verbose(enum lock_usage_bit bit,
				struct lock_class *class)
{
	return state_verbose_f[bit >> 2](class);
}

static int exclusive_bit(int new_bit)
{
	/*
	 * USED_IN
	 * USED_IN_READ
	 * ENABLED
	 * ENABLED_READ
	 *
	 * bit 0 - write/read
	 * bit 1 - used_in/enabled
	 * bit 2+  state
	 */

	int state = new_bit & ~3;
	int dir = new_bit & 2;

	return state | (dir ^ 2);
}

typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
			     enum lock_usage_bit bit, const char *name);

static int
mark_lock_irq_write(struct task_struct *curr, struct held_lock *this,
	const char *name = state_name(new_bit);
	const char *rname = state_rname(new_bit);

	int excl_bit = exclusive_bit(new_bit);
	int dir = new_bit & 2;

	check_usage_f usage = dir ?
		check_usage_backwards : check_usage_forwards;
	if (!valid_state(curr, this, new_bit, excl_bit))
		return 0;
	if (!valid_state(curr, this, new_bit, excl_bit + 1))
		return 0;
	/*
	 * just marked it hardirq-safe, check that this lock
	 * took no hardirq-unsafe lock in the past:
	 */
	if (!usage(curr, this, excl_bit, name))
		return 0;
#if STRICT_READ_CHECKS
	/*
	 * just marked it hardirq-safe, check that this lock
	 * took no hardirq-unsafe-read lock in the past:
	 */
	if (!usage(curr, this, excl_bit + 1, rname))
		return 0;
#endif
	if (state_verbose(new_bit, hlock_class(this)))
mark_lock_irq_read(struct task_struct *curr, struct held_lock *this,
	const char *name = state_name(new_bit);
	const char *rname = state_rname(new_bit);

	int excl_bit = exclusive_bit(new_bit);
	int dir = new_bit & 2;
	if (!valid_state(curr, this, new_bit, excl_bit))
		return 0;

	if (!dir) {
		/*
		 * just marked it hardirq-read-safe, check that this lock
		 * took no hardirq-unsafe lock in the past:
		 */
		if (!check_usage_forwards(curr, this, excl_bit, name))
			return 0;
	} else if (STRICT_READ_CHECKS) {
		/*
		 * just marked it hardirq-read-unsafe, check that no
		 * hardirq-safe lock in the system ever took it in the past:
		 */
		if (!check_usage_backwards(curr, this, excl_bit, name))
			return 0;
	}

	if (state_verbose(new_bit, hlock_class(this)))
static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
		enum lock_usage_bit new_bit)
Ingo Molnar's avatar
Ingo Molnar committed
{
	int ret = 1;
	switch(new_bit) {
Ingo Molnar's avatar
Ingo Molnar committed
	case LOCK_USED_IN_HARDIRQ:
	case LOCK_USED_IN_SOFTIRQ:
	case LOCK_USED_IN_RECLAIM_FS:
	case LOCK_ENABLED_HARDIRQ:
	case LOCK_ENABLED_SOFTIRQ:
	case LOCK_ENABLED_RECLAIM_FS:
		return mark_lock_irq_write(curr, this, new_bit);
Ingo Molnar's avatar
Ingo Molnar committed
	case LOCK_USED_IN_HARDIRQ_READ:
	case LOCK_USED_IN_SOFTIRQ_READ:
	case LOCK_USED_IN_RECLAIM_FS_READ:
	case LOCK_ENABLED_HARDIRQ_READ:
	case LOCK_ENABLED_SOFTIRQ_READ:
	case LOCK_ENABLED_RECLAIM_FS_READ:
		return mark_lock_irq_read(curr, this, new_bit);
Ingo Molnar's avatar
Ingo Molnar committed
	default:
		WARN_ON(1);
#define LOCKDEP_STATE(__STATE)	__STATE,
#include "lockdep_states.h"
#undef LOCKDEP_STATE
#define MARK_HELD_CASE(__STATE)						\
	case __STATE:							\
		if (hlock->read)					\
			usage_bit = LOCK_ENABLED_##__STATE##_READ;	\
		else							\
			usage_bit = LOCK_ENABLED_##__STATE;		\
		break;

Ingo Molnar's avatar
Ingo Molnar committed
/*
 * Mark all held locks with a usage bit:
 */
mark_held_locks(struct task_struct *curr, enum mark_type mark)
Ingo Molnar's avatar
Ingo Molnar committed
{
	enum lock_usage_bit usage_bit;
	struct held_lock *hlock;
	int i;

	for (i = 0; i < curr->lockdep_depth; i++) {
		hlock = curr->held_locks + i;

#define LOCKDEP_STATE(__STATE) MARK_HELD_CASE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
Ingo Molnar's avatar
Ingo Molnar committed
		}
		if (!mark_lock(curr, hlock, usage_bit))
Ingo Molnar's avatar
Ingo Molnar committed
			return 0;
	}

	return 1;
}

/*
 * Debugging helper: via this flag we know that we are in
 * 'early bootup code', and will warn about any invalid irqs-on event:
 */
static int early_boot_irqs_enabled;

void early_boot_irqs_off(void)
{
	early_boot_irqs_enabled = 0;
}

void early_boot_irqs_on(void)
{
	early_boot_irqs_enabled = 1;
}

/*
 * Hardirqs will be enabled:
 */
void trace_hardirqs_on_caller(unsigned long ip)
Ingo Molnar's avatar
Ingo Molnar committed
{
	struct task_struct *curr = current;

	time_hardirqs_on(CALLER_ADDR0, ip);
Ingo Molnar's avatar
Ingo Molnar committed
	if (unlikely(!debug_locks || current->lockdep_recursion))
		return;

	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
		return;

	if (unlikely(curr->hardirqs_enabled)) {
		debug_atomic_inc(&redundant_hardirqs_on);
		return;
	}
	/* we'll do an OFF -> ON transition: */
	curr->hardirqs_enabled = 1;

	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;
	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
		return;
	/*
	 * We are going to turn hardirqs on, so set the
	 * usage bit for all held locks:
	 */
	if (!mark_held_locks(curr, HARDIRQ))
Ingo Molnar's avatar
Ingo Molnar committed
		return;
	/*
	 * If we have softirqs enabled, then set the usage
	 * bit for all held locks. (disabled hardirqs prevented
	 * this bit from being set before)
	 */
	if (curr->softirqs_enabled)
		if (!mark_held_locks(curr, SOFTIRQ))
Ingo Molnar's avatar
Ingo Molnar committed
			return;

	curr->hardirq_enable_ip = ip;
	curr->hardirq_enable_event = ++curr->irq_events;
	debug_atomic_inc(&hardirqs_on_events);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void trace_hardirqs_on(void)
{
	trace_hardirqs_on_caller(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);

/*
 * Hardirqs were disabled:
 */
void trace_hardirqs_off_caller(unsigned long ip)
{
	struct task_struct *curr = current;

	time_hardirqs_off(CALLER_ADDR0, ip);
	if (unlikely(!debug_locks || current->lockdep_recursion))
		return;

	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (curr->hardirqs_enabled) {
		/*
		 * We have done an ON -> OFF transition:
		 */
		curr->hardirqs_enabled = 0;
		curr->hardirq_disable_ip = ip;
		curr->hardirq_disable_event = ++curr->irq_events;
		debug_atomic_inc(&hardirqs_off_events);
	} else
		debug_atomic_inc(&redundant_hardirqs_off);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
void trace_hardirqs_off(void)
{
	trace_hardirqs_off_caller(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_off);

/*
 * Softirqs will be enabled:
 */
void trace_softirqs_on(unsigned long ip)
{
	struct task_struct *curr = current;

	if (unlikely(!debug_locks))
		return;

	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (curr->softirqs_enabled) {
		debug_atomic_inc(&redundant_softirqs_on);
		return;
	}

	/*
	 * We'll do an OFF -> ON transition:
	 */
	curr->softirqs_enabled = 1;
	curr->softirq_enable_ip = ip;
	curr->softirq_enable_event = ++curr->irq_events;
	debug_atomic_inc(&softirqs_on_events);
	/*
	 * We are going to turn softirqs on, so set the
	 * usage bit for all held locks, if hardirqs are
	 * enabled too:
	 */
	if (curr->hardirqs_enabled)
		mark_held_locks(curr, SOFTIRQ);
}

/*
 * Softirqs were disabled:
 */
void trace_softirqs_off(unsigned long ip)
{
	struct task_struct *curr = current;

	if (unlikely(!debug_locks))
		return;

	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return;

	if (curr->softirqs_enabled) {
		/*
		 * We have done an ON -> OFF transition:
		 */
		curr->softirqs_enabled = 0;
		curr->softirq_disable_ip = ip;
		curr->softirq_disable_event = ++curr->irq_events;
		debug_atomic_inc(&softirqs_off_events);
		DEBUG_LOCKS_WARN_ON(!softirq_count());
	} else
		debug_atomic_inc(&redundant_softirqs_off);
}

void lockdep_trace_alloc(gfp_t gfp_mask)
{
	struct task_struct *curr = current;

	if (unlikely(!debug_locks))
		return;

	/* no reclaim without waiting on it */
	if (!(gfp_mask & __GFP_WAIT))
		return;

	/* this guy won't enter reclaim */
	if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
		return;

	/* We're only interested __GFP_FS allocations for now */
	if (!(gfp_mask & __GFP_FS))
		return;

	if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
		return;

	mark_held_locks(curr, RECLAIM_FS);
}

static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
	/*
	 * If non-trylock use in a hardirq or softirq context, then
	 * mark the lock as used in these contexts:
	 */
	if (!hlock->trylock) {
		if (hlock->read) {
			if (curr->hardirq_context)
				if (!mark_lock(curr, hlock,
						LOCK_USED_IN_HARDIRQ_READ))
					return 0;
			if (curr->softirq_context)
				if (!mark_lock(curr, hlock,
						LOCK_USED_IN_SOFTIRQ_READ))
					return 0;
		} else {
			if (curr->hardirq_context)
				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
					return 0;
			if (curr->softirq_context)
				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
					return 0;
		}
	}
	if (!hlock->hardirqs_off) {
		if (hlock->read) {
			if (!mark_lock(curr, hlock,
					LOCK_ENABLED_HARDIRQ_READ))
				return 0;
			if (curr->softirqs_enabled)
				if (!mark_lock(curr, hlock,
						LOCK_ENABLED_SOFTIRQ_READ))
					return 0;
		} else {
			if (!mark_lock(curr, hlock,
					LOCK_ENABLED_HARDIRQ))
				return 0;
			if (curr->softirqs_enabled)
				if (!mark_lock(curr, hlock,
						LOCK_ENABLED_SOFTIRQ))
	/*
	 * We reuse the irq context infrastructure more broadly as a general
	 * context checking code. This tests GFP_FS recursion (a lock taken
	 * during reclaim for a GFP_FS allocation is held over a GFP_FS
	 * allocation).
	 */
	if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
		if (hlock->read) {
			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
					return 0;
		} else {
			if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
					return 0;
		}
	}

	return 1;
}

static int separate_irq_context(struct task_struct *curr,
		struct held_lock *hlock)
{
	unsigned int depth = curr->lockdep_depth;

	/*
	 * Keep track of points where we cross into an interrupt context:
	 */
	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
				curr->softirq_context;
	if (depth) {
		struct held_lock *prev_hlock;

		prev_hlock = curr->held_locks + depth-1;
		/*
		 * If we cross into another context, reset the
		 * hash key (this also prevents the checking and the
		 * adding of the dependency to 'prev'):
		 */
		if (prev_hlock->irq_context != hlock->irq_context)
			return 1;
	}
	return 0;
static inline
int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
		enum lock_usage_bit new_bit)
Ingo Molnar's avatar
Ingo Molnar committed
{
	WARN_ON(1);
	return 1;
}
static inline int mark_irqflags(struct task_struct *curr,
		struct held_lock *hlock)
{
	return 1;
}
static inline int separate_irq_context(struct task_struct *curr,
		struct held_lock *hlock)
{
	return 0;
 * Mark a lock with a usage bit, and validate the state transition:
Ingo Molnar's avatar
Ingo Molnar committed
 */
static int mark_lock(struct task_struct *curr, struct held_lock *this,
			     enum lock_usage_bit new_bit)
Ingo Molnar's avatar
Ingo Molnar committed
{
	unsigned int new_mask = 1 << new_bit, ret = 1;
	 * If already set then do not dirty the cacheline,
	 * nor do any checks:
Ingo Molnar's avatar
Ingo Molnar committed
	 */
	if (likely(hlock_class(this)->usage_mask & new_mask))
		return 1;

	if (!graph_lock())
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
	/*
	 * Make sure we didnt race:
Ingo Molnar's avatar
Ingo Molnar committed
	 */
	if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
		graph_unlock();
		return 1;
	}
	hlock_class(this)->usage_mask |= new_mask;
	if (!save_trace(hlock_class(this)->usage_traces + new_bit))
		return 0;
	switch (new_bit) {
#define LOCKDEP_STATE(__STATE)			\
	case LOCK_USED_IN_##__STATE:		\
	case LOCK_USED_IN_##__STATE##_READ:	\
	case LOCK_ENABLED_##__STATE:		\
	case LOCK_ENABLED_##__STATE##_READ:
#include "lockdep_states.h"
#undef LOCKDEP_STATE
		ret = mark_lock_irq(curr, this, new_bit);
		if (!ret)
			return 0;
		break;
	case LOCK_USED:
		debug_atomic_dec(&nr_unused_locks);
		break;
	default:
		if (!debug_locks_off_graph_unlock())
			return 0;
		WARN_ON(1);
		return 0;
	}
	graph_unlock();

	/*
	 * We must printk outside of the graph_lock:
	 */
	if (ret == 2) {
		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
		print_lock(this);
		print_irqtrace_events(curr);
		dump_stack();
	}

	return ret;
}
Ingo Molnar's avatar
Ingo Molnar committed

/*
 * Initialize a lock instance's lock-class mapping info:
 */
void lockdep_init_map(struct lockdep_map *lock, const char *name,
		      struct lock_class_key *key, int subclass)
Ingo Molnar's avatar
Ingo Molnar committed
{
	if (unlikely(!debug_locks))
		return;

	if (DEBUG_LOCKS_WARN_ON(!key))
		return;
	if (DEBUG_LOCKS_WARN_ON(!name))
		return;
	/*
	 * Sanity check, the lock-class key must be persistent:
	 */
	if (!static_obj(key)) {
		printk("BUG: key %p not in .data!\n", key);
		DEBUG_LOCKS_WARN_ON(1);
		return;
	}
	lock->name = name;
	lock->key = key;
	lock->class_cache = NULL;
#ifdef CONFIG_LOCK_STAT
	lock->cpu = raw_smp_processor_id();
#endif
	if (subclass)
		register_lock_class(lock, subclass, 1);
Ingo Molnar's avatar
Ingo Molnar committed
}
EXPORT_SYMBOL_GPL(lockdep_init_map);

/*
 * This gets called for every mutex_lock*()/spin_lock*() operation.
 * We maintain the dependency maps and validate the locking attempt:
 */
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
			  int trylock, int read, int check, int hardirqs_off,
			  struct lockdep_map *nest_lock, unsigned long ip)
Ingo Molnar's avatar
Ingo Molnar committed
{
	struct task_struct *curr = current;
	struct lock_class *class = NULL;
Ingo Molnar's avatar
Ingo Molnar committed
	struct held_lock *hlock;
	unsigned int depth, id;
	int chain_head = 0;
	u64 chain_key;

	if (!prove_locking)
		check = 1;

Ingo Molnar's avatar
Ingo Molnar committed
	if (unlikely(!debug_locks))
		return 0;

	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return 0;

	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
		debug_locks_off();
		printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
		printk("turning off the locking correctness validator.\n");
		return 0;
	}

	if (!subclass)
		class = lock->class_cache;
	/*
	 * Not cached yet or subclass?
	 */
Ingo Molnar's avatar
Ingo Molnar committed
	if (unlikely(!class)) {
		class = register_lock_class(lock, subclass, 0);
Ingo Molnar's avatar
Ingo Molnar committed
		if (!class)
			return 0;
	}
	debug_atomic_inc((atomic_t *)&class->ops);
	if (very_verbose(class)) {
		printk("\nacquire class [%p] %s", class->key, class->name);
		if (class->name_version > 1)
			printk("#%d", class->name_version);
		printk("\n");
		dump_stack();
	}

	/*
	 * Add the lock to the list of currently held locks.
	 * (we dont increase the depth just yet, up until the
	 * dependency checks are done)
	 */
	depth = curr->lockdep_depth;
	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
		return 0;

	hlock = curr->held_locks + depth;
	if (DEBUG_LOCKS_WARN_ON(!class))
		return 0;
	hlock->class_idx = class - lock_classes + 1;
Ingo Molnar's avatar
Ingo Molnar committed
	hlock->acquire_ip = ip;
	hlock->instance = lock;
	hlock->nest_lock = nest_lock;
Ingo Molnar's avatar
Ingo Molnar committed
	hlock->trylock = trylock;
	hlock->read = read;
	hlock->check = check;
	hlock->hardirqs_off = !!hardirqs_off;
#ifdef CONFIG_LOCK_STAT
	hlock->waittime_stamp = 0;
	hlock->holdtime_stamp = sched_clock();
#endif
	if (check == 2 && !mark_irqflags(curr, hlock))
		return 0;

Ingo Molnar's avatar
Ingo Molnar committed
	/* mark it as used: */
	if (!mark_lock(curr, hlock, LOCK_USED))
Ingo Molnar's avatar
Ingo Molnar committed
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
	/*
	 * Calculate the chain hash: it's the combined hash of all the
Ingo Molnar's avatar
Ingo Molnar committed
	 * lock keys along the dependency chain. We save the hash value
	 * at every step so that we can get the current hash easily
	 * after unlock. The chain hash is then used to cache dependency
	 * results.
	 *
	 * The 'key ID' is what is the most compact key value to drive
	 * the hash, not class->key.
	 */
	id = class - lock_classes;
	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
		return 0;

	chain_key = curr->curr_chain_key;
	if (!depth) {
		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
			return 0;
		chain_head = 1;
	}

	hlock->prev_chain_key = chain_key;
	if (separate_irq_context(curr, hlock)) {
		chain_key = 0;
		chain_head = 1;
Ingo Molnar's avatar
Ingo Molnar committed
	}
	chain_key = iterate_chain_key(chain_key, id);

	if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
		return 0;
	curr->curr_chain_key = chain_key;
Ingo Molnar's avatar
Ingo Molnar committed
	curr->lockdep_depth++;
	check_chain_key(curr);
#ifdef CONFIG_DEBUG_LOCKDEP
	if (unlikely(!debug_locks))
		return 0;
#endif
Ingo Molnar's avatar
Ingo Molnar committed
	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
		debug_locks_off();
		printk("BUG: MAX_LOCK_DEPTH too low!\n");
		printk("turning off the locking correctness validator.\n");
		return 0;
	}
Ingo Molnar's avatar
Ingo Molnar committed
	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
		max_lockdep_depth = curr->lockdep_depth;

	return 1;
}

static int
print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
			   unsigned long ip)
{
	if (!debug_locks_off())
		return 0;
	if (debug_locks_silent)
		return 0;

	printk("\n=====================================\n");
	printk(  "[ BUG: bad unlock balance detected! ]\n");
	printk(  "-------------------------------------\n");
	printk("%s/%d is trying to release lock (",
		curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed
	print_lockdep_cache(lock);
	printk(") at:\n");
	print_ip_sym(ip);
	printk("but there are no more locks to release!\n");
	printk("\nother info that might help us debug this:\n");
	lockdep_print_held_locks(curr);

	printk("\nstack backtrace:\n");
	dump_stack();

	return 0;
}

/*
 * Common debugging checks for both nested and non-nested unlock:
 */
static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
			unsigned long ip)
{
	if (unlikely(!debug_locks))
		return 0;
	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
		return 0;

	if (curr->lockdep_depth <= 0)
		return print_unlock_inbalance_bug(curr, lock, ip);

	return 1;
}

__lock_set_class(struct lockdep_map *lock, const char *name,
		 struct lock_class_key *key, unsigned int subclass,
		 unsigned long ip)
{
	struct task_struct *curr = current;
	struct held_lock *hlock, *prev_hlock;
	struct lock_class *class;
	unsigned int depth;
	int i;

	depth = curr->lockdep_depth;
	if (DEBUG_LOCKS_WARN_ON(!depth))
		return 0;

	prev_hlock = NULL;
	for (i = depth-1; i >= 0; i--) {
		hlock = curr->held_locks + i;
		/*
		 * We must not cross into another context:
		 */
		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
			break;
		if (hlock->instance == lock)
			goto found_it;
		prev_hlock = hlock;
	}
	return print_unlock_inbalance_bug(curr, lock, ip);

found_it:
	lockdep_init_map(lock, name, key, 0);
	class = register_lock_class(lock, subclass, 0);
	hlock->class_idx = class - lock_classes + 1;

	curr->lockdep_depth = i;
	curr->curr_chain_key = hlock->prev_chain_key;

	for (; i < depth; i++) {
		hlock = curr->held_locks + i;
		if (!__lock_acquire(hlock->instance,
			hlock_class(hlock)->subclass, hlock->trylock,
				hlock->read, hlock->check, hlock->hardirqs_off,
				hlock->nest_lock, hlock->acquire_ip))
			return 0;
	}

	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
		return 0;
	return 1;
}

Ingo Molnar's avatar
Ingo Molnar committed
/*
 * Remove the lock to the list of currently held locks in a
 * potentially non-nested (out of order) manner. This is a
 * relatively rare operation, as all the unlock APIs default
 * to nested mode (which uses lock_release()):
 */
static int
lock_release_non_nested(struct task_struct *curr,
			struct lockdep_map *lock, unsigned long ip)
{
	struct held_lock *hlock, *prev_hlock;
	unsigned int depth;
	int i;

	/*
	 * Check whether the lock exists in the current stack
	 * of held locks:
	 */
	depth = curr->lockdep_depth;
	if (DEBUG_LOCKS_WARN_ON(!depth))
		return 0;

	prev_hlock = NULL;
	for (i = depth-1; i >= 0; i--) {
		hlock = curr->held_locks + i;
		/*
		 * We must not cross into another context:
		 */
		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
			break;
		if (hlock->instance == lock)
			goto found_it;
		prev_hlock = hlock;
	}
	return print_unlock_inbalance_bug(curr, lock, ip);

found_it:
	lock_release_holdtime(hlock);

Ingo Molnar's avatar
Ingo Molnar committed
	/*
	 * We have the right lock to unlock, 'hlock' points to it.
	 * Now we remove it from the stack, and add back the other
	 * entries (if any), recalculating the hash along the way:
	 */
	curr->lockdep_depth = i;
	curr->curr_chain_key = hlock->prev_chain_key;

	for (i++; i < depth; i++) {
		hlock = curr->held_locks + i;
		if (!__lock_acquire(hlock->instance,
			hlock_class(hlock)->subclass, hlock->trylock,
Ingo Molnar's avatar
Ingo Molnar committed
				hlock->read, hlock->check, hlock->hardirqs_off,
				hlock->nest_lock, hlock->acquire_ip))
Ingo Molnar's avatar
Ingo Molnar committed
			return 0;
	}

	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
		return 0;
	return 1;
}

/*
 * Remove the lock to the list of currently held locks - this gets
 * called on mutex_unlock()/spin_unlock*() (or on a failed
 * mutex_lock_interruptible()). This is done for unlocks that nest
 * perfectly. (i.e. the current top of the lock-stack is unlocked)
 */
static int lock_release_nested(struct task_struct *curr,
			       struct lockdep_map *lock, unsigned long ip)
{
	struct held_lock *hlock;
	unsigned int depth;

	/*
	 * Pop off the top of the lock stack:
	 */
	depth = curr->lockdep_depth - 1;
	hlock = curr->held_locks + depth;

	/*
	 * Is the unlock non-nested:
	 */
	if (hlock->instance != lock)
		return lock_release_non_nested(curr, lock, ip);
	curr->lockdep_depth--;

	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
		return 0;

	curr->curr_chain_key = hlock->prev_chain_key;

	lock_release_holdtime(hlock);

Ingo Molnar's avatar
Ingo Molnar committed
#ifdef CONFIG_DEBUG_LOCKDEP
	hlock->prev_chain_key = 0;
	hlock->class_idx = 0;
Ingo Molnar's avatar
Ingo Molnar committed
	hlock->acquire_ip = 0;
	hlock->irq_context = 0;
#endif
	return 1;
}

/*
 * Remove the lock to the list of currently held locks - this gets
 * called on mutex_unlock()/spin_unlock*() (or on a failed
 * mutex_lock_interruptible()). This is done for unlocks that nest
 * perfectly. (i.e. the current top of the lock-stack is unlocked)
 */
static void
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{
	struct task_struct *curr = current;

	if (!check_unlock(curr, lock, ip))
		return;

	if (nested) {
		if (!lock_release_nested(curr, lock, ip))
			return;
	} else {
		if (!lock_release_non_nested(curr, lock, ip))
			return;
	}

	check_chain_key(curr);
}

/*
 * Check whether we follow the irq-flags state precisely:
 */
static void check_flags(unsigned long flags)
Ingo Molnar's avatar
Ingo Molnar committed
{
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
    defined(CONFIG_TRACE_IRQFLAGS)
Ingo Molnar's avatar
Ingo Molnar committed
	if (!debug_locks)
		return;

	if (irqs_disabled_flags(flags)) {
		if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
			printk("possible reason: unannotated irqs-off.\n");
		}
	} else {
		if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
			printk("possible reason: unannotated irqs-on.\n");
		}
	}
Ingo Molnar's avatar
Ingo Molnar committed

	/*
	 * We dont accurately track softirq state in e.g.
	 * hardirq contexts (such as on 4KSTACKS), so only
	 * check if not in hardirq contexts:
	 */
	if (!hardirq_count()) {
		if (softirq_count())
			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
		else
			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
	}

	if (!debug_locks)
		print_irqtrace_events(current);
#endif
}

void lock_set_class(struct lockdep_map *lock, const char *name,