Newer
Older
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
if (hlock->instance == lock)
lock_release_holdtime(hlock);
if (hlock->references) {
hlock->references--;
if (hlock->references) {
/*
* We had, and after removing one, still have
* references, the current lock stack is still
* valid. We're done!
*/
return 1;
}
}
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
for (i++; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->nest_lock, hlock->acquire_ip,
hlock->references))
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
return 0;
}
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
return 0;
return 1;
}
/*
* Remove the lock to the list of currently held locks - this gets
* called on mutex_unlock()/spin_unlock*() (or on a failed
* mutex_lock_interruptible()). This is done for unlocks that nest
* perfectly. (i.e. the current top of the lock-stack is unlocked)
*/
static int lock_release_nested(struct task_struct *curr,
struct lockdep_map *lock, unsigned long ip)
{
struct held_lock *hlock;
unsigned int depth;
/*
* Pop off the top of the lock stack:
*/
depth = curr->lockdep_depth - 1;
hlock = curr->held_locks + depth;
/*
* Is the unlock non-nested:
*/
if (hlock->instance != lock || hlock->references)
return lock_release_non_nested(curr, lock, ip);
curr->lockdep_depth--;
if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
return 0;
curr->curr_chain_key = hlock->prev_chain_key;
#ifdef CONFIG_DEBUG_LOCKDEP
hlock->prev_chain_key = 0;
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
hlock->acquire_ip = 0;
hlock->irq_context = 0;
#endif
return 1;
}
/*
* Remove the lock to the list of currently held locks - this gets
* called on mutex_unlock()/spin_unlock*() (or on a failed
* mutex_lock_interruptible()). This is done for unlocks that nest
* perfectly. (i.e. the current top of the lock-stack is unlocked)
*/
static void
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{
struct task_struct *curr = current;
if (!check_unlock(curr, lock, ip))
return;
if (nested) {
if (!lock_release_nested(curr, lock, ip))
return;
} else {
if (!lock_release_non_nested(curr, lock, ip))
return;
}
check_chain_key(curr);
}
static int __lock_is_held(struct lockdep_map *lock)
{
struct task_struct *curr = current;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
struct held_lock *hlock = curr->held_locks + i;
if (match_held_lock(hlock, lock))
return 1;
}
return 0;
}
/*
* Check whether we follow the irq-flags state precisely:
*/
static void check_flags(unsigned long flags)
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
defined(CONFIG_TRACE_IRQFLAGS)
if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
printk("possible reason: unannotated irqs-off.\n");
}
} else {
if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
printk("possible reason: unannotated irqs-on.\n");
}
}
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
* check if not in hardirq contexts:
*/
if (!hardirq_count()) {
if (softirq_count())
DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
else
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
if (!debug_locks)
print_irqtrace_events(current);
#endif
}
void lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip)
{
unsigned long flags;
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
current->lockdep_recursion = 1;
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_set_class);
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip)
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), nest_lock, ip, 0);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
void lock_release(struct lockdep_map *lock, int nested,
trace_lock_release(lock, nested, ip);
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
__lock_release(lock, nested, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
int lock_is_held(struct lockdep_map *lock)
{
unsigned long flags;
int ret = 0;
if (unlikely(current->lockdep_recursion))
return ret;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
ret = __lock_is_held(lock);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
return ret;
}
EXPORT_SYMBOL_GPL(lock_is_held);
void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
{
current->lockdep_reclaim_gfp = gfp_mask;
}
void lockdep_clear_current_reclaim_state(void)
{
current->lockdep_reclaim_gfp = 0;
}
#ifdef CONFIG_LOCK_STAT
static int
print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
unsigned long ip)
{
if (!debug_locks_off())
return 0;
if (debug_locks_silent)
return 0;
printk("\n=================================\n");
printk( "[ BUG: bad contention detected! ]\n");
printk( "---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
printk("but there are no locks held!\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
static void
__lock_contended(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
int i, contention_point, contending_point;
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
print_lock_contention_bug(curr, lock, ip);
return;
found_it:
if (hlock->instance != lock)
return;
hlock->waittime_stamp = lockstat_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
contending_point = lock_point(hlock_class(hlock)->contending_point,
lock->ip);
stats = get_lock_stats(hlock_class(hlock));
if (contention_point < LOCKSTAT_POINTS)
stats->contention_point[contention_point]++;
if (contending_point < LOCKSTAT_POINTS)
stats->contending_point[contending_point]++;
if (lock->cpu != smp_processor_id())
stats->bounces[bounce_contended + !!hlock->read]++;
put_lock_stats(stats);
}
static void
__lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
print_lock_contention_bug(curr, lock, _RET_IP_);
return;
found_it:
if (hlock->instance != lock)
return;
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
waittime = now - hlock->waittime_stamp;
hlock->holdtime_stamp = now;
}
trace_lock_acquired(lock, ip, waittime);
stats = get_lock_stats(hlock_class(hlock));
if (waittime) {
if (hlock->read)
lock_time_inc(&stats->read_waittime, waittime);
else
lock_time_inc(&stats->write_waittime, waittime);
}
if (lock->cpu != cpu)
stats->bounces[bounce_acquired + !!hlock->read]++;
}
void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
trace_lock_contended(lock, ip);
if (unlikely(!lock_stat))
return;
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
__lock_contended(lock, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);
void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
if (unlikely(!lock_stat))
return;
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
#endif
/*
* Used by the testsuite, sanitize the validator state
* after a simulated failure:
*/
void lockdep_reset(void)
{
unsigned long flags;
raw_local_irq_save(flags);
current->curr_chain_key = 0;
current->lockdep_depth = 0;
current->lockdep_recursion = 0;
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
nr_softirq_chains = 0;
nr_process_chains = 0;
debug_locks = 1;
for (i = 0; i < CHAINHASH_SIZE; i++)
INIT_LIST_HEAD(chainhash_table + i);
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
raw_local_irq_restore(flags);
}
static void zap_class(struct lock_class *class)
{
int i;
/*
* Remove all dependencies this lock is
* involved in:
*/
for (i = 0; i < nr_list_entries; i++) {
if (list_entries[i].class == class)
list_del_rcu(&list_entries[i].entry);
}
/*
* Unhash the class and remove it from the all_lock_classes list:
*/
list_del_rcu(&class->hash_entry);
list_del_rcu(&class->lock_entry);
class->key = NULL;
static inline int within(const void *addr, void *start, unsigned long size)
{
return addr >= start && addr < start + size;
}
void lockdep_free_key_range(void *start, unsigned long size)
{
struct lock_class *class, *next;
struct list_head *head;
unsigned long flags;
int i;
locked = graph_lock();
/*
* Unhash all classes that were created by this module:
*/
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_safe(class, next, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
else if (within(class->name, start, size))
zap_class(class);
}
if (locked)
graph_unlock();
raw_local_irq_restore(flags);
}
void lockdep_reset_lock(struct lockdep_map *lock)
{
struct lock_class *class, *next;
struct list_head *head;
unsigned long flags;
int i, j;
* Remove all classes this lock might have:
*/
for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
/*
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
if (class)
zap_class(class);
}
/*
* Debug check: in the end all mapped classes should
* be gone.
locked = graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
continue;
list_for_each_entry_safe(class, next, head, hash_entry) {
if (unlikely(class == lock->class_cache)) {
if (debug_locks_off_graph_unlock())
WARN_ON(1);
goto out_restore;
if (locked)
graph_unlock();
out_restore:
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
{
int i;
/*
* Some architectures have their own start_kernel()
* code which calls lockdep_init(), while we also
* call lockdep_init() from the start_kernel() itself,
* and we want to initialize the hashes only once:
*/
if (lockdep_initialized)
return;
for (i = 0; i < CLASSHASH_SIZE; i++)
INIT_LIST_HEAD(classhash_table + i);
for (i = 0; i < CHAINHASH_SIZE; i++)
INIT_LIST_HEAD(chainhash_table + i);
lockdep_initialized = 1;
}
void __init lockdep_info(void)
{
printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES);
printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH);
printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS);
printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE);
printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES);
printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS);
printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE);
printk(" memory used by lock dependency info: %lu kB\n",
(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
sizeof(struct list_head) * CLASSHASH_SIZE +
sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
sizeof(struct list_head) * CHAINHASH_SIZE
+ sizeof(struct circular_queue)
printk(" per task-struct memory footprint: %lu bytes\n",
sizeof(struct held_lock) * MAX_LOCK_DEPTH);
#ifdef CONFIG_DEBUG_LOCKDEP
if (lockdep_init_error) {
printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
printk("Call stack leading to lockdep invocation was:\n");
print_stack_trace(&lockdep_init_trace, 0);
}
#endif
}
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
printk("\n=========================\n");
printk( "[ BUG: held lock freed! ]\n");
printk( "-------------------------\n");
printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
}
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
const void* lock_from, unsigned long lock_len)
{
return lock_from + lock_len <= mem_from ||
mem_from + mem_len <= lock_from;
}
/*
* Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is
* any held lock in the memory range of <from> to <to>:
*/
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
struct task_struct *curr = current;
struct held_lock *hlock;
unsigned long flags;
int i;
if (unlikely(!debug_locks))
return;
local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (not_in_range(mem_from, mem_len, hlock->instance,
sizeof(*hlock->instance)))
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
static void print_held_locks_bug(struct task_struct *curr)
{
if (!debug_locks_off())
return;
if (debug_locks_silent)
return;
printk("\n=====================================\n");
printk( "[ BUG: lock held at task exit time! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n",
curr->comm, task_pid_nr(curr));
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
}
void debug_check_no_locks_held(struct task_struct *task)
{
if (unlikely(task->lockdep_depth > 0))
print_held_locks_bug(task);
}
void debug_show_all_locks(void)
{
struct task_struct *g, *p;
int count = 10;
int unlock = 1;
Jarek Poplawski
committed
if (unlikely(!debug_locks)) {
printk("INFO: lockdep is turned off.\n");
return;
}
printk("\nShowing all locks held in the system:\n");
/*
* Here we try to get the tasklist_lock as hard as possible,
* if not successful after 2 seconds we ignore it (but keep
* trying). This is to enable a debug printout even if a
* tasklist_lock-holding task deadlocks or crashes.
*/
retry:
if (!read_trylock(&tasklist_lock)) {
if (count == 10)
printk("hm, tasklist_lock locked, retrying... ");
if (count) {
count--;
printk(" #%d", 10-count);
mdelay(200);
goto retry;
}
printk(" ignoring it.\n");
unlock = 0;
} else {
if (count != 10)
printk(KERN_CONT " locked it.\n");
/*
* It's not reliable to print a task's held locks
* if it's not sleeping (or if it's not the current
* task):
*/
if (p->state == TASK_RUNNING && p != current)
continue;
if (p->lockdep_depth)
lockdep_print_held_locks(p);
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
} while_each_thread(g, p);
printk("\n");
printk("=============================================\n\n");
if (unlock)
read_unlock(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(debug_show_all_locks);
/*
* Careful: only use this function if you are sure that
* the task cannot run in parallel!
*/
void __debug_show_held_locks(struct task_struct *task)
Jarek Poplawski
committed
if (unlikely(!debug_locks)) {
printk("INFO: lockdep is turned off.\n");
return;
}
EXPORT_SYMBOL_GPL(__debug_show_held_locks);
void debug_show_held_locks(struct task_struct *task)
{
__debug_show_held_locks(task);
}
void lockdep_sys_exit(void)
{
struct task_struct *curr = current;
if (unlikely(curr->lockdep_depth)) {
if (!debug_locks_off())
return;
printk("\n================================================\n");
printk( "[ BUG: lock held when returning to user space! ]\n");
printk( "------------------------------------------------\n");
printk("%s/%d is leaving the kernel with locks still held!\n",
curr->comm, curr->pid);
lockdep_print_held_locks(curr);
}
}