Skip to content
Snippets Groups Projects
lockdep.c 97 KiB
Newer Older
Ingo Molnar's avatar
Ingo Molnar committed
/*
 * kernel/lockdep.c
 *
 * Runtime locking correctness validator
 *
 * Started by Ingo Molnar:
 *
Peter Zijlstra's avatar
Peter Zijlstra committed
 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnar's avatar
Ingo Molnar committed
 *
 * this code maps all the lock dependencies as they occur in a live kernel
 * and will warn about the following classes of locking bugs:
 *
 * - lock inversion scenarios
 * - circular lock dependencies
 * - hardirq/softirq safe/unsafe locking bugs
 *
 * Bugs are reported even if the current locking scenario does not cause
 * any deadlock at this point.
 *
 * I.e. if anytime in the past two locks were taken in a different order,
 * even if it happened for another task, even if those were different
 * locks (but of the same class as this lock), this code will detect it.
 *
 * Thanks to Arjan van de Ven for coming up with the initial idea of
 * mapping lock dependencies runtime.
 */
#define DISABLE_BRANCH_PROFILING
Ingo Molnar's avatar
Ingo Molnar committed
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <linux/utsname.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
#include <linux/hash.h>
#include <linux/ftrace.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
#include <linux/stringify.h>
#include <linux/bitops.h>
Ingo Molnar's avatar
Ingo Molnar committed
#include <asm/sections.h>

#include "lockdep_internals.h"

#define CREATE_TRACE_POINTS
#include <trace/events/lock.h>
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif

#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif

Ingo Molnar's avatar
Ingo Molnar committed
/*
 * lockdep_lock: protects the lockdep graph, the hashes and the
 *               class/list/hash allocators.
Ingo Molnar's avatar
Ingo Molnar committed
 *
 * This is one of the rare exceptions where it's justified
 * to use a raw spinlock - we really dont want the spinlock
 * code to recurse back into the lockdep code...
Ingo Molnar's avatar
Ingo Molnar committed
 */
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
	arch_spin_lock(&lockdep_lock);
	/*
	 * Make sure that if another CPU detected a bug while
	 * walking the graph we dont change it (while the other
	 * CPU is busy printing out stuff with the graph lock
	 * dropped already)
	 */
	if (!debug_locks) {
		arch_spin_unlock(&lockdep_lock);
	/* prevent any recursions within lockdep from causing deadlocks */
	current->lockdep_recursion++;
	return 1;
}

static inline int graph_unlock(void)
{
	if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
		return DEBUG_LOCKS_WARN_ON(1);

	current->lockdep_recursion--;
	arch_spin_unlock(&lockdep_lock);
	return 0;
}

/*
 * Turn lock debugging off and return with 0 if it was off already,
 * and also release the graph lock:
 */
static inline int debug_locks_off_graph_unlock(void)
{
	int ret = debug_locks_off();

	arch_spin_unlock(&lockdep_lock);
Ingo Molnar's avatar
Ingo Molnar committed

static int lockdep_initialized;

unsigned long nr_list_entries;
Peter Zijlstra's avatar
Peter Zijlstra committed
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
Ingo Molnar's avatar
Ingo Molnar committed

/*
 * All data structures here are protected by the global debug_lock.
 *
 * Mutex key structs only get allocated, once during bootup, and never
 * get freed - this significantly simplifies the debugging code.
 */
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];

static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
	if (!hlock->class_idx) {
		DEBUG_LOCKS_WARN_ON(1);
		return NULL;
	}
	return lock_classes + hlock->class_idx - 1;
}

#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
		      cpu_lock_stats);
static inline u64 lockstat_clock(void)
{
static int lock_point(unsigned long points[], unsigned long ip)
	for (i = 0; i < LOCKSTAT_POINTS; i++) {
		if (points[i] == 0) {
			points[i] = ip;
		if (points[i] == ip)
static void lock_time_inc(struct lock_time *lt, u64 time)
{
	if (time > lt->max)
		lt->max = time;

	if (time < lt->min || !lt->nr)
		lt->min = time;

	lt->total += time;
	lt->nr++;
}

static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
	if (!src->nr)
		return;

	if (src->max > dst->max)
		dst->max = src->max;

	if (src->min < dst->min || !dst->nr)
		dst->min = src->min;

	dst->total += src->total;
	dst->nr += src->nr;
}

struct lock_class_stats lock_stats(struct lock_class *class)
{
	struct lock_class_stats stats;
	int cpu, i;

	memset(&stats, 0, sizeof(struct lock_class_stats));
	for_each_possible_cpu(cpu) {
		struct lock_class_stats *pcs =
			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];

		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
			stats.contention_point[i] += pcs->contention_point[i];

		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
			stats.contending_point[i] += pcs->contending_point[i];

		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
		lock_time_add(&pcs->write_waittime, &stats.write_waittime);

		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);

		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
			stats.bounces[i] += pcs->bounces[i];
	}

	return stats;
}

void clear_lock_stats(struct lock_class *class)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct lock_class_stats *cpu_stats =
			&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];

		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
	}
	memset(class->contention_point, 0, sizeof(class->contention_point));
	memset(class->contending_point, 0, sizeof(class->contending_point));
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
	return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
}

static void put_lock_stats(struct lock_class_stats *stats)
{
	put_cpu_var(cpu_lock_stats);
}

static void lock_release_holdtime(struct held_lock *hlock)
{
	struct lock_class_stats *stats;
	u64 holdtime;

	if (!lock_stat)
		return;

	holdtime = lockstat_clock() - hlock->holdtime_stamp;
	stats = get_lock_stats(hlock_class(hlock));
	if (hlock->read)
		lock_time_inc(&stats->read_holdtime, holdtime);
	else
		lock_time_inc(&stats->write_holdtime, holdtime);
	put_lock_stats(stats);
}
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
#endif

Ingo Molnar's avatar
Ingo Molnar committed
/*
 * We keep a global list of all lock classes. The list only grows,
 * never shrinks. The list is only accessed with the lockdep
 * spinlock lock held.
 */
LIST_HEAD(all_lock_classes);

/*
 * The lockdep classes are in a hash-table as well, for fast lookup:
 */
#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
Peter Zijlstra's avatar
Peter Zijlstra committed
#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
Ingo Molnar's avatar
Ingo Molnar committed
#define classhashentry(key)	(classhash_table + __classhashfn((key)))

static struct list_head classhash_table[CLASSHASH_SIZE];

/*
 * We put the lock dependency chains into a hash-table as well, to cache
 * their existence:
 */
#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
Peter Zijlstra's avatar
Peter Zijlstra committed
#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
Ingo Molnar's avatar
Ingo Molnar committed
#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))

static struct list_head chainhash_table[CHAINHASH_SIZE];

/*
 * The hash key of the lock dependency chains is a hash itself too:
 * it's a hash of all locks taken up to that lock, including that lock.
 * It's a 64-bit hash, because it's important for the keys to be
 * unique.
 */
#define iterate_chain_key(key1, key2) \
	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
Ingo Molnar's avatar
Ingo Molnar committed
	(key2))

void lockdep_off(void)
Ingo Molnar's avatar
Ingo Molnar committed
{
	current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);

void lockdep_on(void)
Ingo Molnar's avatar
Ingo Molnar committed
{
	current->lockdep_recursion--;
}
EXPORT_SYMBOL(lockdep_on);

/*
 * Debugging switches:
 */

#define VERBOSE			0
#define VERY_VERBOSE		0
Ingo Molnar's avatar
Ingo Molnar committed

#if VERBOSE
# define HARDIRQ_VERBOSE	1
# define SOFTIRQ_VERBOSE	1
# define RECLAIM_VERBOSE	1
Ingo Molnar's avatar
Ingo Molnar committed
#else
# define HARDIRQ_VERBOSE	0
# define SOFTIRQ_VERBOSE	0
# define RECLAIM_VERBOSE	0
Ingo Molnar's avatar
Ingo Molnar committed
#endif

#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
Ingo Molnar's avatar
Ingo Molnar committed
/*
 * Quick filtering for interesting events:
 */
static int class_filter(struct lock_class *class)
{
#if 0
	/* Example */
Ingo Molnar's avatar
Ingo Molnar committed
	if (class->name_version == 1 &&
			!strcmp(class->name, "lockname"))
Ingo Molnar's avatar
Ingo Molnar committed
		return 1;
	if (class->name_version == 1 &&
			!strcmp(class->name, "&struct->lockfield"))
Ingo Molnar's avatar
Ingo Molnar committed
		return 1;
	/* Filter everything else. 1 would be to allow everything else */
	return 0;
Ingo Molnar's avatar
Ingo Molnar committed
}
#endif

static int verbose(struct lock_class *class)
{
#if VERBOSE
	return class_filter(class);
#endif
	return 0;
}

/*
 * Stack-trace: tightly packed array of stack backtrace
 * addresses. Protected by the graph_lock.
Ingo Molnar's avatar
Ingo Molnar committed
 */
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];

static int save_trace(struct stack_trace *trace)
{
	trace->nr_entries = 0;
	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
	trace->entries = stack_trace + nr_stack_trace_entries;

	save_stack_trace(trace);
Ingo Molnar's avatar
Ingo Molnar committed

Peter Zijlstra's avatar
Peter Zijlstra committed
	/*
	 * Some daft arches put -1 at the end to indicate its a full trace.
	 *
	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
	 * complete trace that maxes out the entries provided will be reported
	 * as incomplete, friggin useless </rant>
	 */
	if (trace->nr_entries != 0 &&
	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
Peter Zijlstra's avatar
Peter Zijlstra committed
		trace->nr_entries--;

Ingo Molnar's avatar
Ingo Molnar committed
	trace->max_entries = trace->nr_entries;

	nr_stack_trace_entries += trace->nr_entries;

Peter Zijlstra's avatar
Peter Zijlstra committed
	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
		if (!debug_locks_off_graph_unlock())
			return 0;

		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();

Ingo Molnar's avatar
Ingo Molnar committed
		return 0;
	}

	return 1;
}

unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;

#ifdef CONFIG_DEBUG_LOCKDEP
/*
 * We cannot printk in early bootup code. Not even early_printk()
 * might work. So we mark any initialization errors and printk
 * about it later on, in lockdep_info().
 */
static int lockdep_init_error;
static unsigned long lockdep_init_trace_data[20];
static struct stack_trace lockdep_init_trace = {
	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
	.entries = lockdep_init_trace_data,
};
Ingo Molnar's avatar
Ingo Molnar committed

/*
 * Various lockdep statistics:
 */
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
Ingo Molnar's avatar
Ingo Molnar committed
#endif

/*
 * Locking printouts:
 */

#define __USAGE(__STATE)						\
Peter Zijlstra's avatar
Peter Zijlstra committed
	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
Ingo Molnar's avatar
Ingo Molnar committed
static const char *usage_str[] =
{
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
	[LOCK_USED] = "INITIAL USE",
Ingo Molnar's avatar
Ingo Molnar committed
};

const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
{
	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
static inline unsigned long lock_flag(enum lock_usage_bit bit)
Ingo Molnar's avatar
Ingo Molnar committed
{
	return 1UL << bit;
}
Ingo Molnar's avatar
Ingo Molnar committed

static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
	char c = '.';

	if (class->usage_mask & lock_flag(bit + 2))
		c = '+';
	if (class->usage_mask & lock_flag(bit)) {
		c = '-';
		if (class->usage_mask & lock_flag(bit + 2))
			c = '?';
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
	int i = 0;
#define LOCKDEP_STATE(__STATE) 						\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE

	usage[i] = '\0';
static int __print_lock_name(struct lock_class *class)
{
	char str[KSYM_NAME_LEN];
	const char *name;

	name = class->name;
	if (!name)
		name = __get_key_name(class->key, str);

	return printk("%s", name);
}

Ingo Molnar's avatar
Ingo Molnar committed
static void print_lock_name(struct lock_class *class)
{
	char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
Ingo Molnar's avatar
Ingo Molnar committed
	const char *name;

	get_usage_chars(class, usage);
Ingo Molnar's avatar
Ingo Molnar committed

	name = class->name;
	if (!name) {
		name = __get_key_name(class->key, str);
		printk(" (%s", name);
	} else {
		printk(" (%s", name);
		if (class->name_version > 1)
			printk("#%d", class->name_version);
		if (class->subclass)
			printk("/%d", class->subclass);
	}
	printk("){%s}", usage);
Ingo Molnar's avatar
Ingo Molnar committed
}

static void print_lockdep_cache(struct lockdep_map *lock)
{
	const char *name;
	char str[KSYM_NAME_LEN];
Ingo Molnar's avatar
Ingo Molnar committed

	name = lock->name;
	if (!name)
		name = __get_key_name(lock->key->subkeys, str);

	printk("%s", name);
}

static void print_lock(struct held_lock *hlock)
{
	print_lock_name(hlock_class(hlock));
Ingo Molnar's avatar
Ingo Molnar committed
	printk(", at: ");
	print_ip_sym(hlock->acquire_ip);
}

static void lockdep_print_held_locks(struct task_struct *curr)
{
	int i, depth = curr->lockdep_depth;

	if (!depth) {
		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed
		return;
	}
	printk("%d lock%s held by %s/%d:\n",
		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed

	for (i = 0; i < depth; i++) {
		printk(" #%d: ", i);
		print_lock(curr->held_locks + i);
	}
}

static void print_kernel_version(void)
{
	printk("%s %.*s\n", init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);
}

static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
	return class_filter(class);
#endif
	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
/*
 * Is this the address of a static object:
Ingo Molnar's avatar
Ingo Molnar committed
 */
static int static_obj(void *obj)
Ingo Molnar's avatar
Ingo Molnar committed
{
	unsigned long start = (unsigned long) &_stext,
		      end   = (unsigned long) &_end,
		      addr  = (unsigned long) obj;

Ingo Molnar's avatar
Ingo Molnar committed
	/*
	 * static variable?
Ingo Molnar's avatar
Ingo Molnar committed
	 */
	if ((addr >= start) && (addr < end))
		return 1;
Ingo Molnar's avatar
Ingo Molnar committed

	if (arch_is_kernel_data(addr))
		return 1;

Ingo Molnar's avatar
Ingo Molnar committed
	/*
Ingo Molnar's avatar
Ingo Molnar committed
	 */
	if (is_kernel_percpu_address(addr))
		return 1;
Ingo Molnar's avatar
Ingo Molnar committed

	 * module static or percpu var?
	return is_module_address(addr) || is_module_percpu_address(addr);
Ingo Molnar's avatar
Ingo Molnar committed
/*
 * To make lock name printouts unique, we calculate a unique
 * class->name_version generation counter:
Ingo Molnar's avatar
Ingo Molnar committed
 */
static int count_matching_names(struct lock_class *new_class)
Ingo Molnar's avatar
Ingo Molnar committed
{
	struct lock_class *class;
	int count = 0;
Ingo Molnar's avatar
Ingo Molnar committed

	if (!new_class->name)
Ingo Molnar's avatar
Ingo Molnar committed
		return 0;

	list_for_each_entry(class, &all_lock_classes, lock_entry) {
		if (new_class->key - new_class->subclass == class->key)
			return class->name_version;
		if (class->name && !strcmp(class->name, new_class->name))
			count = max(count, class->name_version);
	}
Ingo Molnar's avatar
Ingo Molnar committed

	return count + 1;
/*
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
 */
static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
Ingo Molnar's avatar
Ingo Molnar committed
{
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct lock_class *class;
Ingo Molnar's avatar
Ingo Molnar committed

#ifdef CONFIG_DEBUG_LOCKDEP
	/*
	 * If the architecture calls into lockdep before initializing
	 * the hashes then we'll warn about it later. (we cannot printk
	 * right now)
	 */
	if (unlikely(!lockdep_initialized)) {
		lockdep_init();
		lockdep_init_error = 1;
		save_stack_trace(&lockdep_init_trace);
Ingo Molnar's avatar
Ingo Molnar committed

	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
		debug_locks_off();
		printk(KERN_ERR
			"BUG: looking up invalid subclass: %u\n", subclass);
		printk(KERN_ERR
			"turning off the locking correctness validator.\n");
		dump_stack();
		return NULL;
	}

	/*
	 * Static locks do not have their class-keys yet - for them the key
	 * is the lock object itself:
	 */
	if (unlikely(!lock->key))
		lock->key = (void *)lock;
Ingo Molnar's avatar
Ingo Molnar committed

	/*
	 * NOTE: the class-key must be unique. For dynamic locks, a static
	 * lock_class_key variable is passed in through the mutex_init()
	 * (or spin_lock_init()) call - which acts as the key. For static
	 * locks we use the lock object itself as the key.
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
	BUILD_BUG_ON(sizeof(struct lock_class_key) >
			sizeof(struct lockdep_map));
Ingo Molnar's avatar
Ingo Molnar committed

	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);
	/*
	 * We can walk the hash lockfree, because the hash only
	 * grows, and we are careful when adding entries to the end:
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
	list_for_each_entry(class, hash_head, hash_entry) {
		if (class->key == key) {
			WARN_ON_ONCE(class->name != lock->name);
			return class;
Ingo Molnar's avatar
Ingo Molnar committed

	return NULL;
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
Ingo Molnar's avatar
Ingo Molnar committed
 */
static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
Ingo Molnar's avatar
Ingo Molnar committed
{
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct lock_class *class;
	unsigned long flags;

	class = look_up_lock_class(lock, subclass);
	if (likely(class))
		return class;

	/*
	 * Debug-check: all keys must be persistent!
 	 */
	if (!static_obj(lock->key)) {
		debug_locks_off();
		printk("INFO: trying to register non-static key.\n");
		printk("the code is fine but needs lockdep annotation.\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();

		return NULL;
	}

	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);

	raw_local_irq_save(flags);
	if (!graph_lock()) {
		raw_local_irq_restore(flags);
		return NULL;
	}
	/*
	 * We have to do the hash-walk again, to avoid races
	 * with another CPU:
	 */
	list_for_each_entry(class, hash_head, hash_entry)
		if (class->key == key)
			goto out_unlock_set;
	/*
	 * Allocate a new key from the static array, and add it to
	 * the hash:
	 */
	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
		if (!debug_locks_off_graph_unlock()) {
			raw_local_irq_restore(flags);
			return NULL;
		}
		raw_local_irq_restore(flags);

		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();
		return NULL;
	}
	class = lock_classes + nr_lock_classes++;
	debug_atomic_inc(nr_unused_locks);
	class->key = key;
	class->name = lock->name;
	class->subclass = subclass;
	INIT_LIST_HEAD(&class->lock_entry);
	INIT_LIST_HEAD(&class->locks_before);
	INIT_LIST_HEAD(&class->locks_after);
	class->name_version = count_matching_names(class);
	/*
	 * We use RCU's safe list-add method to make
	 * parallel walking of the hash-list safe:
	 */
	list_add_tail_rcu(&class->hash_entry, hash_head);
	/*
	 * Add it to the global list of classes:
	 */
	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);

	if (verbose(class)) {
		graph_unlock();
		raw_local_irq_restore(flags);

		printk("\nnew class %p: %s", class->key, class->name);
		if (class->name_version > 1)
			printk("#%d", class->name_version);
		printk("\n");
		dump_stack();

		raw_local_irq_save(flags);
		if (!graph_lock()) {
			raw_local_irq_restore(flags);
			return NULL;
		}
	}
out_unlock_set:
	graph_unlock();
	raw_local_irq_restore(flags);

	if (!subclass || force)
		lock->class_cache[0] = class;
	else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
		lock->class_cache[subclass] = class;

	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
		return NULL;

	return class;
}

#ifdef CONFIG_PROVE_LOCKING
/*
 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 * with NULL on failure)
 */
static struct lock_list *alloc_list_entry(void)
{
	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
		if (!debug_locks_off_graph_unlock())
			return NULL;

		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();
		return NULL;
	}
	return list_entries + nr_list_entries++;
}

/*
 * Add a new dependency to the head of the list:
 */
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
			    struct list_head *head, unsigned long ip,
			    int distance, struct stack_trace *trace)
{
	struct lock_list *entry;
	/*
	 * Lock not present yet - get a new dependency struct and
	 * add it to the list:
	 */
	entry = alloc_list_entry();
	if (!entry)
		return 0;

	entry->class = this;
	entry->distance = distance;
	entry->trace = *trace;
	/*
	 * Since we never remove from the dependency list, the list can
	 * be walked lockless by other CPUs, it's only allocation
	 * that must be protected by the spinlock. But this also means
	 * we must make new entries visible only once writes to the
	 * entry become visible - hence the RCU op:
	 */
	list_add_tail_rcu(&entry->entry, head);

	return 1;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
/*
 * For good efficiency of modular, we use power of 2
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)

Peter Zijlstra's avatar
Peter Zijlstra committed
/*
 * The circular_queue and helpers is used to implement the
Peter Zijlstra's avatar
Peter Zijlstra committed
 * breadth-first search(BFS)algorithem, by which we can build
 * the shortest path from the next lock to be acquired to the
 * previous held lock if there is a circular between them.
Peter Zijlstra's avatar
Peter Zijlstra committed
struct circular_queue {
	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
	unsigned int  front, rear;
};

static struct circular_queue lock_cq;

unsigned int max_bfs_queue_depth;
static unsigned int lockdep_dependency_gen_id;

Peter Zijlstra's avatar
Peter Zijlstra committed
static inline void __cq_init(struct circular_queue *cq)
{
	cq->front = cq->rear = 0;
	lockdep_dependency_gen_id++;
Peter Zijlstra's avatar
Peter Zijlstra committed
}

static inline int __cq_empty(struct circular_queue *cq)
{
	return (cq->front == cq->rear);
}

static inline int __cq_full(struct circular_queue *cq)
{
	return ((cq->rear + 1) & CQ_MASK) == cq->front;
}

static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
{
	if (__cq_full(cq))
		return -1;

	cq->element[cq->rear] = elem;
	cq->rear = (cq->rear + 1) & CQ_MASK;
	return 0;
}

static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
{
	if (__cq_empty(cq))
		return -1;

	*elem = cq->element[cq->front];
	cq->front = (cq->front + 1) & CQ_MASK;
	return 0;
}

static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
{
	return (cq->rear - cq->front) & CQ_MASK;
}

static inline void mark_lock_accessed(struct lock_list *lock,
					struct lock_list *parent)
{
	unsigned long nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
	nr = lock - list_entries;
	WARN_ON(nr >= nr_list_entries);
	lock->parent = parent;
	lock->class->dep_gen_id = lockdep_dependency_gen_id;
Peter Zijlstra's avatar
Peter Zijlstra committed
}

static inline unsigned long lock_accessed(struct lock_list *lock)
{
	unsigned long nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
	nr = lock - list_entries;
	WARN_ON(nr >= nr_list_entries);
	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
Peter Zijlstra's avatar
Peter Zijlstra committed
}

static inline struct lock_list *get_lock_parent(struct lock_list *child)
{
	return child->parent;
}

static inline int get_lock_depth(struct lock_list *child)
{
	int depth = 0;
	struct lock_list *parent;

	while ((parent = get_lock_parent(child))) {
		child = parent;
		depth++;
	}
	return depth;
}

static int __bfs(struct lock_list *source_entry,
Peter Zijlstra's avatar
Peter Zijlstra committed
		 void *data,
		 int (*match)(struct lock_list *entry, void *data),
		 struct lock_list **target_entry,
		 int forward)
	struct list_head *head;
	struct circular_queue *cq = &lock_cq;
	int ret = 1;

	if (match(source_entry, data)) {
		*target_entry = source_entry;
		ret = 0;
		goto exit;
	}

	if (forward)
		head = &source_entry->class->locks_after;
	else
		head = &source_entry->class->locks_before;

	if (list_empty(head))
		goto exit;

	__cq_init(cq);
	__cq_enqueue(cq, (unsigned long)source_entry);

	while (!__cq_empty(cq)) {
		struct lock_list *lock;

		__cq_dequeue(cq, (unsigned long *)&lock);

		if (!lock->class) {
			ret = -2;
			goto exit;
		}

		if (forward)
			head = &lock->class->locks_after;
		else