Newer
Older
static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
return 0;
/*
* just marked it hardirq-safe, check that this lock
* took no hardirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-safe, check that this lock
* took no hardirq-unsafe-read lock in the past:
*/
if (!check_usage_forwards(curr, this,
if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
return 0;
/*
* just marked it softirq-safe, check that this lock
* took no softirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-safe, check that this lock
* took no softirq-unsafe-read lock in the past:
*/
if (!check_usage_forwards(curr, this,
if (softirq_verbose(hlock_class(this)))
case LOCK_USED_IN_RECLAIM_FS:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS))
return 0;
if (!valid_state(curr, this, new_bit,
return 0;
/*
* just marked it reclaim-fs-safe, check that this lock
* took no reclaim-fs-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_RECLAIM_FS, "reclaim-fs"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it reclaim-fs-safe, check that this lock
* took no reclaim-fs-unsafe-read lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_RECLAIM_FS_READ, "reclaim-fs-read"))
return 0;
#endif
if (reclaim_verbose(hlock_class(this)))
ret = 2;
break;
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQ))
return 0;
/*
* just marked it hardirq-read-safe, check that this lock
* took no hardirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
if (hardirq_verbose(hlock_class(this)))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ_READ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQ))
return 0;
/*
* just marked it softirq-read-safe, check that this lock
* took no softirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
if (softirq_verbose(hlock_class(this)))
case LOCK_USED_IN_RECLAIM_FS_READ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_RECLAIM_FS))
return 0;
/*
* just marked it reclaim-fs-read-safe, check that this lock
* took no reclaim-fs-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_RECLAIM_FS, "reclaim-fs"))
return 0;
if (reclaim_verbose(hlock_class(this)))
ret = 2;
break;
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_HARDIRQ_READ))
return 0;
/*
* just marked it hardirq-unsafe, check that no hardirq-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-unsafe, check that no
* hardirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
return 0;
#endif
if (hardirq_verbose(hlock_class(this)))
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_SOFTIRQ_READ))
return 0;
/*
* just marked it softirq-unsafe, check that no softirq-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-unsafe, check that no
* softirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
return 0;
#endif
if (softirq_verbose(hlock_class(this)))
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_RECLAIM_FS_READ))
return 0;
/*
* just marked it reclaim-fs-unsafe, check that no reclaim-fs-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_RECLAIM_FS, "reclaim-fs"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-unsafe, check that no
* softirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_RECLAIM_FS_READ, "reclaim-fs-read"))
return 0;
#endif
if (reclaim_verbose(hlock_class(this)))
ret = 2;
break;
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-read-unsafe, check that no
* hardirq-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#endif
if (hardirq_verbose(hlock_class(this)))
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-read-unsafe, check that no
* softirq-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#endif
if (softirq_verbose(hlock_class(this)))
case LOCK_ENABLED_RECLAIM_FS_READ:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_RECLAIM_FS))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it reclaim-fs-read-unsafe, check that no
* reclaim-fs-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_RECLAIM_FS, "reclaim-fs"))
return 0;
#endif
if (reclaim_verbose(hlock_class(this)))
ret = 2;
break;
enum mark_type {
HARDIRQ,
SOFTIRQ,
RECLAIM_FS,
};
/*
* Mark all held locks with a usage bit:
*/
mark_held_locks(struct task_struct *curr, enum mark_type mark)
{
enum lock_usage_bit usage_bit;
struct held_lock *hlock;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
switch (mark) {
case HARDIRQ:
break;
case SOFTIRQ:
break;
case RECLAIM_FS:
if (hlock->read)
usage_bit = LOCK_ENABLED_RECLAIM_FS_READ;
usage_bit = LOCK_ENABLED_RECLAIM_FS;
break;
default:
BUG();
if (!mark_lock(curr, hlock, usage_bit))
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
return 0;
}
return 1;
}
/*
* Debugging helper: via this flag we know that we are in
* 'early bootup code', and will warn about any invalid irqs-on event:
*/
static int early_boot_irqs_enabled;
void early_boot_irqs_off(void)
{
early_boot_irqs_enabled = 0;
}
void early_boot_irqs_on(void)
{
early_boot_irqs_enabled = 1;
}
/*
* Hardirqs will be enabled:
*/
void trace_hardirqs_on_caller(unsigned long ip)
time_hardirqs_on(CALLER_ADDR0, ip);
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
return;
if (unlikely(curr->hardirqs_enabled)) {
debug_atomic_inc(&redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;
/*
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
*/
if (!mark_held_locks(curr, HARDIRQ))
return;
/*
* If we have softirqs enabled, then set the usage
* bit for all held locks. (disabled hardirqs prevented
* this bit from being set before)
*/
if (curr->softirqs_enabled)
if (!mark_held_locks(curr, SOFTIRQ))
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_on_events);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void trace_hardirqs_on(void)
{
trace_hardirqs_on_caller(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
/*
* Hardirqs were disabled:
*/
void trace_hardirqs_off_caller(unsigned long ip)
{
struct task_struct *curr = current;
time_hardirqs_off(CALLER_ADDR0, ip);
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->hardirqs_enabled) {
/*
* We have done an ON -> OFF transition:
*/
curr->hardirqs_enabled = 0;
curr->hardirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_off_events);
} else
debug_atomic_inc(&redundant_hardirqs_off);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
void trace_hardirqs_off(void)
{
trace_hardirqs_off_caller(CALLER_ADDR0);
}
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
EXPORT_SYMBOL(trace_hardirqs_off);
/*
* Softirqs will be enabled:
*/
void trace_softirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
debug_atomic_inc(&redundant_softirqs_on);
return;
}
/*
* We'll do an OFF -> ON transition:
*/
curr->softirqs_enabled = 1;
curr->softirq_enable_ip = ip;
curr->softirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
* usage bit for all held locks, if hardirqs are
* enabled too:
*/
if (curr->hardirqs_enabled)
mark_held_locks(curr, SOFTIRQ);
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
}
/*
* Softirqs were disabled:
*/
void trace_softirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
/*
* We have done an ON -> OFF transition:
*/
curr->softirqs_enabled = 0;
curr->softirq_disable_ip = ip;
curr->softirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&softirqs_off_events);
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
debug_atomic_inc(&redundant_softirqs_off);
}
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
void lockdep_trace_alloc(gfp_t gfp_mask)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
/* no reclaim without waiting on it */
if (!(gfp_mask & __GFP_WAIT))
return;
/* this guy won't enter reclaim */
if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
return;
/* We're only interested __GFP_FS allocations for now */
if (!(gfp_mask & __GFP_FS))
return;
if (DEBUG_LOCKS_WARN_ON(irqs_disabled()))
return;
mark_held_locks(curr, RECLAIM_FS);
}
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
*/
if (!hlock->trylock) {
if (hlock->read) {
if (curr->hardirq_context)
if (!mark_lock(curr, hlock,
LOCK_USED_IN_HARDIRQ_READ))
return 0;
if (curr->softirq_context)
if (!mark_lock(curr, hlock,
LOCK_USED_IN_SOFTIRQ_READ))
return 0;
} else {
if (curr->hardirq_context)
if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
return 0;
if (curr->softirq_context)
if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
return 0;
}
}
if (!hlock->hardirqs_off) {
if (hlock->read) {
if (!mark_lock(curr, hlock,
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
return 0;
} else {
if (!mark_lock(curr, hlock,
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
/*
* We reuse the irq context infrastructure more broadly as a general
* context checking code. This tests GFP_FS recursion (a lock taken
* during reclaim for a GFP_FS allocation is held over a GFP_FS
* allocation).
*/
if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
if (hlock->read) {
if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
return 0;
} else {
if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
return 0;
}
}
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
return 1;
}
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
unsigned int depth = curr->lockdep_depth;
/*
* Keep track of points where we cross into an interrupt context:
*/
hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
curr->softirq_context;
if (depth) {
struct held_lock *prev_hlock;
prev_hlock = curr->held_locks + depth-1;
/*
* If we cross into another context, reset the
* hash key (this also prevents the checking and the
* adding of the dependency to 'prev'):
*/
if (prev_hlock->irq_context != hlock->irq_context)
return 1;
}
return 0;
static inline
int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
static inline int mark_irqflags(struct task_struct *curr,
struct held_lock *hlock)
{
return 1;
}
static inline int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
return 0;
* Mark a lock with a usage bit, and validate the state transition:
static int mark_lock(struct task_struct *curr, struct held_lock *this,
unsigned int new_mask = 1 << new_bit, ret = 1;
* If already set then do not dirty the cacheline,
* nor do any checks:
if (likely(hlock_class(this)->usage_mask & new_mask))
return 1;
if (!graph_lock())
return 0;
if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
hlock_class(this)->usage_mask |= new_mask;
if (!save_trace(hlock_class(this)->usage_traces + new_bit))
switch (new_bit) {
case LOCK_USED_IN_HARDIRQ:
case LOCK_USED_IN_SOFTIRQ:
case LOCK_USED_IN_HARDIRQ_READ:
case LOCK_USED_IN_SOFTIRQ_READ:
case LOCK_ENABLED_HARDIRQ:
case LOCK_ENABLED_SOFTIRQ:
case LOCK_ENABLED_HARDIRQ_READ:
case LOCK_ENABLED_SOFTIRQ_READ:
case LOCK_USED_IN_RECLAIM_FS:
case LOCK_USED_IN_RECLAIM_FS_READ:
case LOCK_ENABLED_RECLAIM_FS:
case LOCK_ENABLED_RECLAIM_FS_READ:
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
break;
case LOCK_USED:
debug_atomic_dec(&nr_unused_locks);
break;
default:
if (!debug_locks_off_graph_unlock())
return 0;
WARN_ON(1);
return 0;
}
graph_unlock();
/*
* We must printk outside of the graph_lock:
*/
if (ret == 2) {
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
print_lock(this);
print_irqtrace_events(curr);
dump_stack();
}
return ret;
}
/*
* Initialize a lock instance's lock-class mapping info:
*/
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!key))
return;
if (DEBUG_LOCKS_WARN_ON(!name))
return;
/*
* Sanity check, the lock-class key must be persistent:
*/
if (!static_obj(key)) {
printk("BUG: key %p not in .data!\n", key);
DEBUG_LOCKS_WARN_ON(1);
return;
}
lock->name = name;
lock->key = key;
lock->class_cache = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
#endif
if (subclass)
register_lock_class(lock, subclass, 1);
}
EXPORT_SYMBOL_GPL(lockdep_init_map);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
* We maintain the dependency maps and validate the locking attempt:
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
struct lockdep_map *nest_lock, unsigned long ip)
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth, id;
int chain_head = 0;
u64 chain_key;
if (!prove_locking)
check = 1;
if (unlikely(!debug_locks))
return 0;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
if (!subclass)
class = lock->class_cache;
/*
* Not cached yet or subclass?
*/
class = register_lock_class(lock, subclass, 0);
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
if (!class)
return 0;
}
debug_atomic_inc((atomic_t *)&class->ops);
if (very_verbose(class)) {
printk("\nacquire class [%p] %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
}
/*
* Add the lock to the list of currently held locks.
* (we dont increase the depth just yet, up until the
* dependency checks are done)
*/
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
hlock = curr->held_locks + depth;
if (DEBUG_LOCKS_WARN_ON(!class))
return 0;
hlock->class_idx = class - lock_classes + 1;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
hlock->hardirqs_off = !!hardirqs_off;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
#endif
if (check == 2 && !mark_irqflags(curr, hlock))
return 0;
if (!mark_lock(curr, hlock, LOCK_USED))
* Calculate the chain hash: it's the combined hash of all the
* lock keys along the dependency chain. We save the hash value
* at every step so that we can get the current hash easily
* after unlock. The chain hash is then used to cache dependency
* results.
*
* The 'key ID' is what is the most compact key value to drive
* the hash, not class->key.
*/
id = class - lock_classes;
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return 0;
chain_key = curr->curr_chain_key;
if (!depth) {
if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
return 0;
chain_head = 1;
}
hlock->prev_chain_key = chain_key;
if (separate_irq_context(curr, hlock)) {
chain_key = 0;
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, id);
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
curr->curr_chain_key = chain_key;
#ifdef CONFIG_DEBUG_LOCKDEP
if (unlikely(!debug_locks))
return 0;
#endif
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
debug_locks_off();
printk("BUG: MAX_LOCK_DEPTH too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
return 1;
}
static int
print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
unsigned long ip)
{
if (!debug_locks_off())
return 0;
if (debug_locks_silent)
return 0;
printk("\n=====================================\n");
printk( "[ BUG: bad unlock balance detected! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
printk("but there are no more locks to release!\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Common debugging checks for both nested and non-nested unlock:
*/
static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
unsigned long ip)
{
if (unlikely(!debug_locks))
return 0;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
if (curr->lockdep_depth <= 0)
return print_unlock_inbalance_bug(curr, lock, ip);
return 1;
}
__lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
unsigned long ip)
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct lock_class *class;
unsigned int depth;
int i;
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (hlock->instance == lock)
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
lockdep_init_map(lock, name, key, 0);
class = register_lock_class(lock, subclass, 0);
hlock->class_idx = class - lock_classes + 1;
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
for (; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
return 0;
}
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
return 0;
return 1;
}
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
/*
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
* relatively rare operation, as all the unlock APIs default
* to nested mode (which uses lock_release()):
*/
static int
lock_release_non_nested(struct task_struct *curr,
struct lockdep_map *lock, unsigned long ip)
{
struct held_lock *hlock, *prev_hlock;
unsigned int depth;
int i;
/*
* Check whether the lock exists in the current stack
* of held locks:
*/
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (hlock->instance == lock)
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
for (i++; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
return 0;
}
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
return 0;
return 1;
}