Newer
Older
/*
* kernel/lockdep.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
*
* - lock inversion scenarios
* - circular lock dependencies
* - hardirq/softirq safe/unsafe locking bugs
*
* Bugs are reported even if the current locking scenario does not cause
* any deadlock at this point.
*
* I.e. if anytime in the past two locks were taken in a different order,
* even if it happened for another task, even if those were different
* locks (but of the same class as this lock), this code will detect it.
*
* Thanks to Arjan van de Ven for coming up with the initial idea of
* mapping lock dependencies runtime.
*/
#define DISABLE_BRANCH_PROFILING
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <asm/sections.h>
#include "lockdep_internals.h"
#include <trace/events/lockdep.h>
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif
#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
__raw_spin_lock(&lockdep_lock);
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
* CPU is busy printing out stuff with the graph lock
* dropped already)
*/
if (!debug_locks) {
__raw_spin_unlock(&lockdep_lock);
return 0;
}
/* prevent any recursions within lockdep from causing deadlocks */
current->lockdep_recursion++;
return 1;
}
static inline int graph_unlock(void)
{
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
current->lockdep_recursion--;
__raw_spin_unlock(&lockdep_lock);
return 0;
}
/*
* Turn lock debugging off and return with 0 if it was off already,
* and also release the graph lock:
*/
static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
__raw_spin_unlock(&lockdep_lock);
return ret;
}
static int lockdep_initialized;
unsigned long nr_list_entries;
struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
* All data structures here are protected by the global debug_lock.
*
* Mutex key structs only get allocated, once during bootup, and never
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
if (!hlock->class_idx) {
DEBUG_LOCKS_WARN_ON(1);
return NULL;
}
return lock_classes + hlock->class_idx - 1;
}
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
static int lock_point(unsigned long points[], unsigned long ip)
for (i = 0; i < LOCKSTAT_POINTS; i++) {
if (points[i] == 0) {
points[i] = ip;
break;
}
return i;
}
static void lock_time_inc(struct lock_time *lt, s64 time)
{
if (time > lt->max)
lt->max = time;
if (time < lt->min || !lt->min)
lt->min = time;
lt->total += time;
lt->nr++;
}
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
dst->min += src->min;
dst->max += src->max;
dst->total += src->total;
dst->nr += src->nr;
}
struct lock_class_stats lock_stats(struct lock_class *class)
{
struct lock_class_stats stats;
int cpu, i;
memset(&stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
&per_cpu(lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
stats.contending_point[i] += pcs->contending_point[i];
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
stats.bounces[i] += pcs->bounces[i];
}
return stats;
}
void clear_lock_stats(struct lock_class *class)
{
int cpu;
for_each_possible_cpu(cpu) {
struct lock_class_stats *cpu_stats =
&per_cpu(lock_stats, cpu)[class - lock_classes];
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
memset(class->contention_point, 0, sizeof(class->contention_point));
memset(class->contending_point, 0, sizeof(class->contending_point));
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
return &get_cpu_var(lock_stats)[class - lock_classes];
}
static void put_lock_stats(struct lock_class_stats *stats)
{
put_cpu_var(lock_stats);
}
static void lock_release_holdtime(struct held_lock *hlock)
{
struct lock_class_stats *stats;
s64 holdtime;
if (!lock_stat)
return;
holdtime = sched_clock() - hlock->holdtime_stamp;
stats = get_lock_stats(hlock_class(hlock));
if (hlock->read)
lock_time_inc(&stats->read_holdtime, holdtime);
else
lock_time_inc(&stats->write_holdtime, holdtime);
put_lock_stats(stats);
}
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
#endif
/*
* We keep a global list of all lock classes. The list only grows,
* never shrinks. The list is only accessed with the lockdep
* spinlock lock held.
*/
LIST_HEAD(all_lock_classes);
/*
* The lockdep classes are in a hash-table as well, for fast lookup:
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct list_head classhash_table[CLASSHASH_SIZE];
/*
* We put the lock dependency chains into a hash-table as well, to cache
* their existence:
*/
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct list_head chainhash_table[CHAINHASH_SIZE];
/*
* The hash key of the lock dependency chains is a hash itself too:
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
*/
#define iterate_chain_key(key1, key2) \
(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
void lockdep_off(void)
{
current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void)
{
current->lockdep_recursion--;
}
EXPORT_SYMBOL(lockdep_on);
/*
* Debugging switches:
*/
#define VERBOSE 0
#if VERBOSE
# define HARDIRQ_VERBOSE 1
# define SOFTIRQ_VERBOSE 1
#else
# define HARDIRQ_VERBOSE 0
# define SOFTIRQ_VERBOSE 0
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
/*
* Quick filtering for interesting events:
*/
static int class_filter(struct lock_class *class)
{
!strcmp(class->name, "&struct->lockfield"))
/* Filter everything else. 1 would be to allow everything else */
return 0;
}
#endif
static int verbose(struct lock_class *class)
{
#if VERBOSE
return class_filter(class);
#endif
return 0;
}
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the graph_lock.
*/
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
static int save_trace(struct stack_trace *trace)
{
trace->nr_entries = 0;
trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
trace->entries = stack_trace + nr_stack_trace_entries;
trace->skip = 3;
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
if (!debug_locks_off_graph_unlock())
return 0;
printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
dump_stack();
return 0;
}
return 1;
}
unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
unsigned int max_recursion_depth;
static unsigned int lockdep_dependency_gen_id;
static bool lockdep_dependency_visit(struct lock_class *source,
unsigned int depth)
{
if (!depth)
lockdep_dependency_gen_id++;
if (source->dep_gen_id == lockdep_dependency_gen_id)
return true;
source->dep_gen_id = lockdep_dependency_gen_id;
return false;
}
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* We cannot printk in early bootup code. Not even early_printk()
* might work. So we mark any initialization errors and printk
* about it later on, in lockdep_info().
*/
static int lockdep_init_error;
static unsigned long lockdep_init_trace_data[20];
static struct stack_trace lockdep_init_trace = {
.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
.entries = lockdep_init_trace_data,
};
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
/*
* Various lockdep statistics:
*/
atomic_t chain_lookup_hits;
atomic_t chain_lookup_misses;
atomic_t hardirqs_on_events;
atomic_t hardirqs_off_events;
atomic_t redundant_hardirqs_on;
atomic_t redundant_hardirqs_off;
atomic_t softirqs_on_events;
atomic_t softirqs_off_events;
atomic_t redundant_softirqs_on;
atomic_t redundant_softirqs_off;
atomic_t nr_unused_locks;
atomic_t nr_cyclic_checks;
atomic_t nr_cyclic_check_recursions;
atomic_t nr_find_usage_forwards_checks;
atomic_t nr_find_usage_forwards_recursions;
atomic_t nr_find_usage_backwards_checks;
atomic_t nr_find_usage_backwards_recursions;
#endif
/*
* Locking printouts:
*/
[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
[LOCK_USED] = "INITIAL USE",
};
const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
{
return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
static inline unsigned long lock_flag(enum lock_usage_bit bit)
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
char c = '.';
if (class->usage_mask & lock_flag(bit + 2))
c = '+';
if (class->usage_mask & lock_flag(bit)) {
c = '-';
if (class->usage_mask & lock_flag(bit + 2))
c = '?';
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
#define LOCKDEP_STATE(__STATE) \
usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE
usage[i] = '\0';
}
static void print_lock_name(struct lock_class *class)
{
char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
name = class->name;
if (!name) {
name = __get_key_name(class->key, str);
printk(" (%s", name);
} else {
printk(" (%s", name);
if (class->name_version > 1)
printk("#%d", class->name_version);
if (class->subclass)
printk("/%d", class->subclass);
}
}
static void print_lockdep_cache(struct lockdep_map *lock)
{
const char *name;
char str[KSYM_NAME_LEN];
name = lock->name;
if (!name)
name = __get_key_name(lock->key->subkeys, str);
printk("%s", name);
}
static void print_lock(struct held_lock *hlock)
{
printk(", at: ");
print_ip_sym(hlock->acquire_ip);
}
static void lockdep_print_held_locks(struct task_struct *curr)
{
int i, depth = curr->lockdep_depth;
if (!depth) {
printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
return;
}
printk("%d lock%s held by %s/%d:\n",
depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
print_lock(curr->held_locks + i);
}
}
static void print_lock_class_header(struct lock_class *class, int depth)
{
int bit;
print_lock_name(class);
printk(" ops: %lu", class->ops);
printk(" {\n");
for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
if (class->usage_mask & (1 << bit)) {
int len = depth;
len += printk("%*s %s", depth, "", usage_str[bit]);
len += printk(" at:\n");
print_stack_trace(class->usage_traces + bit, len);
}
}
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
/*
* printk the shortest lock dependencies from @start to @end in reverse order:
*/
static void __used
print_shortest_lock_dependencies(struct lock_list *leaf,
struct lock_list *root)
{
struct lock_list *entry = leaf;
int depth;
/*compute depth from generated tree by BFS*/
depth = get_lock_depth(leaf);
do {
print_lock_class_header(entry->class, depth);
printk("%*s ... acquired at:\n", depth, "");
print_stack_trace(&entry->trace, 2);
printk("\n");
if (depth == 0 && (entry != root)) {
printk("lockdep:%s bad BFS generated tree\n", __func__);
break;
}
entry = get_lock_parent(entry);
depth--;
} while (entry && (depth >= 0));
return;
}
/*
* printk all lock dependencies starting at <entry>:
*/
static void __used
print_lock_dependencies(struct lock_class *class, int depth)
if (lockdep_dependency_visit(class, depth))
return;
if (DEBUG_LOCKS_WARN_ON(depth >= 20))
return;
print_lock_class_header(class, depth);
list_for_each_entry(entry, &class->locks_after, entry) {
if (DEBUG_LOCKS_WARN_ON(!entry->class))
return;
print_lock_dependencies(entry->class, depth + 1);
printk("%*s ... acquired at:\n",depth,"");
print_stack_trace(&entry->trace, 2);
printk("\n");
}
}
static void print_kernel_version(void)
{
printk("%s %.*s\n", init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
}
static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
return class_filter(class);
#endif
return 0;
}
* Is this the address of a static object:
unsigned long start = (unsigned long) &_stext,
end = (unsigned long) &_end,
addr = (unsigned long) obj;
#ifdef CONFIG_SMP
int i;
#endif
if ((addr >= start) && (addr < end))
return 1;
for_each_possible_cpu(i) {
start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
+ per_cpu_offset(i);
if ((addr >= start) && (addr < end))
return 1;
}
/*
* module var?
*/
return is_module_address(addr);
* To make lock name printouts unique, we calculate a unique
* class->name_version generation counter:
static int count_matching_names(struct lock_class *new_class)
struct lock_class *class;
int count = 0;
list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key)
return class->name_version;
if (class->name && !strcmp(class->name, new_class->name))
count = max(count, class->name_version);
}
/*
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
*/
static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
struct lockdep_subclass_key *key;
struct list_head *hash_head;
struct lock_class *class;
#ifdef CONFIG_DEBUG_LOCKDEP
/*
* If the architecture calls into lockdep before initializing
* the hashes then we'll warn about it later. (we cannot printk
* right now)
*/
if (unlikely(!lockdep_initialized)) {
lockdep_init();
lockdep_init_error = 1;
save_stack_trace(&lockdep_init_trace);
/*
* Static locks do not have their class-keys yet - for them the key
* is the lock object itself:
*/
if (unlikely(!lock->key))
lock->key = (void *)lock;
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
* lock_class_key variable is passed in through the mutex_init()
* (or spin_lock_init()) call - which acts as the key. For static
* locks we use the lock object itself as the key.
*/
BUILD_BUG_ON(sizeof(struct lock_class_key) >
sizeof(struct lockdep_map));
/*
* We can walk the hash lockfree, because the hash only
* grows, and we are careful when adding entries to the end:
*/
list_for_each_entry(class, hash_head, hash_entry) {
if (class->key == key) {
WARN_ON_ONCE(class->name != lock->name);
* Register a lock's class in the hash-table, if the class is not present
* yet. Otherwise we look it up. We cache the result in the lock object
* itself, so actual lookup of the hash should be once per lock object.
static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
struct lockdep_subclass_key *key;
struct list_head *hash_head;
struct lock_class *class;
unsigned long flags;
class = look_up_lock_class(lock, subclass);
if (likely(class))
return class;
/*
* Debug-check: all keys must be persistent!
*/
if (!static_obj(lock->key)) {
debug_locks_off();
printk("INFO: trying to register non-static key.\n");
printk("the code is fine but needs lockdep annotation.\n");
printk("turning off the locking correctness validator.\n");
dump_stack();
return NULL;
}
key = lock->key->subkeys + subclass;
hash_head = classhashentry(key);
raw_local_irq_save(flags);
if (!graph_lock()) {
raw_local_irq_restore(flags);
return NULL;
}
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
list_for_each_entry(class, hash_head, hash_entry)
if (class->key == key)
goto out_unlock_set;
/*
* Allocate a new key from the static array, and add it to
* the hash:
*/
if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
if (!debug_locks_off_graph_unlock()) {
raw_local_irq_restore(flags);
return NULL;
}
raw_local_irq_restore(flags);
printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
}
class = lock_classes + nr_lock_classes++;
debug_atomic_inc(&nr_unused_locks);
class->key = key;
class->name = lock->name;
class->subclass = subclass;
INIT_LIST_HEAD(&class->lock_entry);
INIT_LIST_HEAD(&class->locks_before);
INIT_LIST_HEAD(&class->locks_after);
class->name_version = count_matching_names(class);
/*
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
*/
list_add_tail_rcu(&class->hash_entry, hash_head);
/*
* Add it to the global list of classes:
*/
list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
if (verbose(class)) {
graph_unlock();
raw_local_irq_restore(flags);
printk("\nnew class %p: %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
raw_local_irq_save(flags);
if (!graph_lock()) {
raw_local_irq_restore(flags);
return NULL;
}
}
out_unlock_set:
graph_unlock();
raw_local_irq_restore(flags);
if (!subclass || force)
lock->class_cache = class;
if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
return NULL;
return class;
}
#ifdef CONFIG_PROVE_LOCKING
/*
* Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure)
*/
static struct lock_list *alloc_list_entry(void)
{
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
if (!debug_locks_off_graph_unlock())
return NULL;
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
}
return list_entries + nr_list_entries++;
}
/*
* Add a new dependency to the head of the list:
*/
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
struct list_head *head, unsigned long ip, int distance)
{
struct lock_list *entry;
/*
* Lock not present yet - get a new dependency struct and
* add it to the list:
*/
entry = alloc_list_entry();
if (!entry)
return 0;
if (!save_trace(&entry->trace))
return 0;
entry->class = this;
entry->distance = distance;
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
* that must be protected by the spinlock. But this also means
* we must make new entries visible only once writes to the
* entry become visible - hence the RCU op:
*/
list_add_tail_rcu(&entry->entry, head);
return 1;
}
unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
static struct circular_queue lock_cq;
unsigned int max_bfs_queue_depth;
static int __bfs(struct lock_list *source_entry,
void *data,
int (*match)(struct lock_list *entry, void *data),
struct lock_list **target_entry,
int forward)
{
struct lock_list *entry;
struct circular_queue *cq = &lock_cq;
int ret = 1;
*target_entry = source_entry;
ret = 0;
goto exit;
}
if (forward)
head = &source_entry->class->locks_after;
else
head = &source_entry->class->locks_before;
if (list_empty(head))
goto exit;
__cq_init(cq);
__cq_enqueue(cq, (unsigned long)source_entry);
while (!__cq_empty(cq)) {
struct lock_list *lock;
__cq_dequeue(cq, (unsigned long *)&lock);
if (!lock->class) {
ret = -2;
goto exit;
}
if (forward)
head = &lock->class->locks_after;
else
head = &lock->class->locks_before;
list_for_each_entry(entry, head, entry) {
if (!lock_accessed(entry)) {
mark_lock_accessed(entry, lock);
*target_entry = entry;
ret = 0;
goto exit;
}
if (__cq_enqueue(cq, (unsigned long)entry)) {
ret = -1;
goto exit;
}
cq_depth = __cq_get_elem_count(cq);
if (max_bfs_queue_depth < cq_depth)
max_bfs_queue_depth = cq_depth;
}
}
}
exit:
return ret;
}
static inline int __bfs_forwards(struct lock_list *src_entry,