Newer
Older
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
if (softirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_HARDIRQ_READ))
return 0;
/*
* just marked it hardirq-unsafe, check that no hardirq-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-unsafe, check that no
* hardirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
return 0;
#endif
if (hardirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_SOFTIRQ_READ))
return 0;
/*
* just marked it softirq-unsafe, check that no softirq-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-unsafe, check that no
* softirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
return 0;
#endif
if (softirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS_READ:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-read-unsafe, check that no
* hardirq-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#endif
if (hardirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS_READ:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-read-unsafe, check that no
* softirq-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#endif
if (softirq_verbose(this->class))
ret = 2;
break;
default:
WARN_ON(1);
}
return ret;
}
/*
* Mark all held locks with a usage bit:
*/
mark_held_locks(struct task_struct *curr, int hardirq)
{
enum lock_usage_bit usage_bit;
struct held_lock *hlock;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (hardirq) {
if (hlock->read)
usage_bit = LOCK_ENABLED_HARDIRQS_READ;
else
usage_bit = LOCK_ENABLED_HARDIRQS;
} else {
if (hlock->read)
usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
else
usage_bit = LOCK_ENABLED_SOFTIRQS;
}
if (!mark_lock(curr, hlock, usage_bit))
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
return 0;
}
return 1;
}
/*
* Debugging helper: via this flag we know that we are in
* 'early bootup code', and will warn about any invalid irqs-on event:
*/
static int early_boot_irqs_enabled;
void early_boot_irqs_off(void)
{
early_boot_irqs_enabled = 0;
}
void early_boot_irqs_on(void)
{
early_boot_irqs_enabled = 1;
}
/*
* Hardirqs will be enabled:
*/
void trace_hardirqs_on_caller(unsigned long a0)
{
struct task_struct *curr = current;
unsigned long ip;
time_hardirqs_on(CALLER_ADDR0, a0);
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
return;
if (unlikely(curr->hardirqs_enabled)) {
debug_atomic_inc(&redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
ip = (unsigned long) __builtin_return_address(0);
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;
/*
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
*/
if (!mark_held_locks(curr, 1))
return;
/*
* If we have softirqs enabled, then set the usage
* bit for all held locks. (disabled hardirqs prevented
* this bit from being set before)
*/
if (curr->softirqs_enabled)
if (!mark_held_locks(curr, 0))
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_on_events);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
void trace_hardirqs_on(void)
{
trace_hardirqs_on_caller(CALLER_ADDR0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
/*
* Hardirqs were disabled:
*/
void trace_hardirqs_off_caller(unsigned long a0)
{
struct task_struct *curr = current;
time_hardirqs_off(CALLER_ADDR0, a0);
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->hardirqs_enabled) {
/*
* We have done an ON -> OFF transition:
*/
curr->hardirqs_enabled = 0;
curr->hardirq_disable_ip = _RET_IP_;
curr->hardirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_off_events);
} else
debug_atomic_inc(&redundant_hardirqs_off);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
void trace_hardirqs_off(void)
{
trace_hardirqs_off_caller(CALLER_ADDR0);
}
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
EXPORT_SYMBOL(trace_hardirqs_off);
/*
* Softirqs will be enabled:
*/
void trace_softirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
debug_atomic_inc(&redundant_softirqs_on);
return;
}
/*
* We'll do an OFF -> ON transition:
*/
curr->softirqs_enabled = 1;
curr->softirq_enable_ip = ip;
curr->softirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
* usage bit for all held locks, if hardirqs are
* enabled too:
*/
if (curr->hardirqs_enabled)
mark_held_locks(curr, 0);
}
/*
* Softirqs were disabled:
*/
void trace_softirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
/*
* We have done an ON -> OFF transition:
*/
curr->softirqs_enabled = 0;
curr->softirq_disable_ip = ip;
curr->softirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&softirqs_off_events);
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
debug_atomic_inc(&redundant_softirqs_off);
}
static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
{
/*
* If non-trylock use in a hardirq or softirq context, then
* mark the lock as used in these contexts:
*/
if (!hlock->trylock) {
if (hlock->read) {
if (curr->hardirq_context)
if (!mark_lock(curr, hlock,
LOCK_USED_IN_HARDIRQ_READ))
return 0;
if (curr->softirq_context)
if (!mark_lock(curr, hlock,
LOCK_USED_IN_SOFTIRQ_READ))
return 0;
} else {
if (curr->hardirq_context)
if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
return 0;
if (curr->softirq_context)
if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
return 0;
}
}
if (!hlock->hardirqs_off) {
if (hlock->read) {
if (!mark_lock(curr, hlock,
LOCK_ENABLED_HARDIRQS_READ))
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
LOCK_ENABLED_SOFTIRQS_READ))
return 0;
} else {
if (!mark_lock(curr, hlock,
LOCK_ENABLED_HARDIRQS))
return 0;
if (curr->softirqs_enabled)
if (!mark_lock(curr, hlock,
LOCK_ENABLED_SOFTIRQS))
return 0;
}
}
return 1;
}
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
unsigned int depth = curr->lockdep_depth;
/*
* Keep track of points where we cross into an interrupt context:
*/
hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
curr->softirq_context;
if (depth) {
struct held_lock *prev_hlock;
prev_hlock = curr->held_locks + depth-1;
/*
* If we cross into another context, reset the
* hash key (this also prevents the checking and the
* adding of the dependency to 'prev'):
*/
if (prev_hlock->irq_context != hlock->irq_context)
return 1;
}
return 0;
static inline
int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
static inline int mark_irqflags(struct task_struct *curr,
struct held_lock *hlock)
{
return 1;
}
static inline int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
return 0;
* Mark a lock with a usage bit, and validate the state transition:
static int mark_lock(struct task_struct *curr, struct held_lock *this,
unsigned int new_mask = 1 << new_bit, ret = 1;
* If already set then do not dirty the cacheline,
* nor do any checks:
if (likely(this->class->usage_mask & new_mask))
return 1;
if (!graph_lock())
return 0;
if (unlikely(this->class->usage_mask & new_mask)) {
graph_unlock();
return 1;
}
if (!save_trace(this->class->usage_traces + new_bit))
return 0;
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
switch (new_bit) {
case LOCK_USED_IN_HARDIRQ:
case LOCK_USED_IN_SOFTIRQ:
case LOCK_USED_IN_HARDIRQ_READ:
case LOCK_USED_IN_SOFTIRQ_READ:
case LOCK_ENABLED_HARDIRQS:
case LOCK_ENABLED_SOFTIRQS:
case LOCK_ENABLED_HARDIRQS_READ:
case LOCK_ENABLED_SOFTIRQS_READ:
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
break;
case LOCK_USED:
debug_atomic_dec(&nr_unused_locks);
break;
default:
if (!debug_locks_off_graph_unlock())
return 0;
WARN_ON(1);
return 0;
}
graph_unlock();
/*
* We must printk outside of the graph_lock:
*/
if (ret == 2) {
printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
print_lock(this);
print_irqtrace_events(curr);
dump_stack();
}
return ret;
}
/*
* Initialize a lock instance's lock-class mapping info:
*/
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!key))
return;
if (DEBUG_LOCKS_WARN_ON(!name))
return;
/*
* Sanity check, the lock-class key must be persistent:
*/
if (!static_obj(key)) {
printk("BUG: key %p not in .data!\n", key);
DEBUG_LOCKS_WARN_ON(1);
return;
}
lock->name = name;
lock->key = key;
lock->class_cache = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
#endif
if (subclass)
register_lock_class(lock, subclass, 1);
}
EXPORT_SYMBOL_GPL(lockdep_init_map);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
* We maintain the dependency maps and validate the locking attempt:
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
unsigned long ip)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth, id;
int chain_head = 0;
u64 chain_key;
if (!prove_locking)
check = 1;
if (unlikely(!debug_locks))
return 0;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
if (!subclass)
class = lock->class_cache;
/*
* Not cached yet or subclass?
*/
class = register_lock_class(lock, subclass, 0);
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
if (!class)
return 0;
}
debug_atomic_inc((atomic_t *)&class->ops);
if (very_verbose(class)) {
printk("\nacquire class [%p] %s", class->key, class->name);
if (class->name_version > 1)
printk("#%d", class->name_version);
printk("\n");
dump_stack();
}
/*
* Add the lock to the list of currently held locks.
* (we dont increase the depth just yet, up until the
* dependency checks are done)
*/
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
hlock = curr->held_locks + depth;
hlock->class = class;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
hlock->hardirqs_off = hardirqs_off;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
#endif
if (check == 2 && !mark_irqflags(curr, hlock))
return 0;
if (!mark_lock(curr, hlock, LOCK_USED))
* Calculate the chain hash: it's the combined hash of all the
* lock keys along the dependency chain. We save the hash value
* at every step so that we can get the current hash easily
* after unlock. The chain hash is then used to cache dependency
* results.
*
* The 'key ID' is what is the most compact key value to drive
* the hash, not class->key.
*/
id = class - lock_classes;
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return 0;
chain_key = curr->curr_chain_key;
if (!depth) {
if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
return 0;
chain_head = 1;
}
hlock->prev_chain_key = chain_key;
if (separate_irq_context(curr, hlock)) {
chain_key = 0;
chain_head = 1;
}
chain_key = iterate_chain_key(chain_key, id);
if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
curr->curr_chain_key = chain_key;
#ifdef CONFIG_DEBUG_LOCKDEP
if (unlikely(!debug_locks))
return 0;
#endif
if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
debug_locks_off();
printk("BUG: MAX_LOCK_DEPTH too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
if (unlikely(curr->lockdep_depth > max_lockdep_depth))
max_lockdep_depth = curr->lockdep_depth;
return 1;
}
static int
print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
unsigned long ip)
{
if (!debug_locks_off())
return 0;
if (debug_locks_silent)
return 0;
printk("\n=====================================\n");
printk( "[ BUG: bad unlock balance detected! ]\n");
printk( "-------------------------------------\n");
printk("%s/%d is trying to release lock (",
curr->comm, task_pid_nr(curr));
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
printk("but there are no more locks to release!\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Common debugging checks for both nested and non-nested unlock:
*/
static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
unsigned long ip)
{
if (unlikely(!debug_locks))
return 0;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
if (curr->lockdep_depth <= 0)
return print_unlock_inbalance_bug(curr, lock, ip);
return 1;
}
/*
* Remove the lock to the list of currently held locks in a
* potentially non-nested (out of order) manner. This is a
* relatively rare operation, as all the unlock APIs default
* to nested mode (which uses lock_release()):
*/
static int
lock_release_non_nested(struct task_struct *curr,
struct lockdep_map *lock, unsigned long ip)
{
struct held_lock *hlock, *prev_hlock;
unsigned int depth;
int i;
/*
* Check whether the lock exists in the current stack
* of held locks:
*/
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return 0;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (hlock->instance == lock)
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
for (i++; i < depth; i++) {
hlock = curr->held_locks + i;
if (!__lock_acquire(hlock->instance,
hlock->class->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
hlock->acquire_ip))
return 0;
}
if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
return 0;
return 1;
}
/*
* Remove the lock to the list of currently held locks - this gets
* called on mutex_unlock()/spin_unlock*() (or on a failed
* mutex_lock_interruptible()). This is done for unlocks that nest
* perfectly. (i.e. the current top of the lock-stack is unlocked)
*/
static int lock_release_nested(struct task_struct *curr,
struct lockdep_map *lock, unsigned long ip)
{
struct held_lock *hlock;
unsigned int depth;
/*
* Pop off the top of the lock stack:
*/
depth = curr->lockdep_depth - 1;
hlock = curr->held_locks + depth;
/*
* Is the unlock non-nested:
*/
if (hlock->instance != lock)
return lock_release_non_nested(curr, lock, ip);
curr->lockdep_depth--;
if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
return 0;
curr->curr_chain_key = hlock->prev_chain_key;
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
#ifdef CONFIG_DEBUG_LOCKDEP
hlock->prev_chain_key = 0;
hlock->class = NULL;
hlock->acquire_ip = 0;
hlock->irq_context = 0;
#endif
return 1;
}
/*
* Remove the lock to the list of currently held locks - this gets
* called on mutex_unlock()/spin_unlock*() (or on a failed
* mutex_lock_interruptible()). This is done for unlocks that nest
* perfectly. (i.e. the current top of the lock-stack is unlocked)
*/
static void
__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
{
struct task_struct *curr = current;
if (!check_unlock(curr, lock, ip))
return;
if (nested) {
if (!lock_release_nested(curr, lock, ip))
return;
} else {
if (!lock_release_non_nested(curr, lock, ip))
return;
}
check_chain_key(curr);
}
/*
* Check whether we follow the irq-flags state precisely:
*/
static void check_flags(unsigned long flags)
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
defined(CONFIG_TRACE_IRQFLAGS)
if (irqs_disabled_flags(flags)) {
if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
printk("possible reason: unannotated irqs-off.\n");
}
} else {
if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
printk("possible reason: unannotated irqs-on.\n");
}
}
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
* check if not in hardirq contexts:
*/
if (!hardirq_count()) {
if (softirq_count())
DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
else
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
if (!debug_locks)
print_irqtrace_events(current);
#endif
}
/*
* We are not always called with irqs disabled - do that here,
* and also avoid lockdep recursion:
*/
void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, unsigned long ip)
if (unlikely(!lock_stat && !prove_locking))
return;
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
irqs_disabled_flags(flags), ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
void lock_release(struct lockdep_map *lock, int nested,
if (unlikely(!lock_stat && !prove_locking))
return;
if (unlikely(current->lockdep_recursion))
return;
raw_local_irq_save(flags);
check_flags(flags);
current->lockdep_recursion = 1;
__lock_release(lock, nested, ip);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
#ifdef CONFIG_LOCK_STAT
static int
print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
unsigned long ip)
{
if (!debug_locks_off())
return 0;
if (debug_locks_silent)
return 0;
printk("\n=================================\n");
printk( "[ BUG: bad contention detected! ]\n");
printk( "---------------------------------\n");
printk("%s/%d is trying to contend lock (",
curr->comm, task_pid_nr(curr));
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
print_lockdep_cache(lock);
printk(") at:\n");
print_ip_sym(ip);
printk("but there are no locks held!\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
static void
__lock_contended(struct lockdep_map *lock, unsigned long ip)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
int i, point;
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (hlock->instance == lock)
goto found_it;
prev_hlock = hlock;
}
print_lock_contention_bug(curr, lock, ip);
return;
found_it:
hlock->waittime_stamp = sched_clock();
point = lock_contention_point(hlock->class, ip);
stats = get_lock_stats(hlock->class);
if (point < ARRAY_SIZE(stats->contention_point))
stats->contention_point[i]++;
if (lock->cpu != smp_processor_id())
stats->bounces[bounce_contended + !!hlock->read]++;
put_lock_stats(stats);
}
static void
__lock_acquired(struct lockdep_map *lock)
{
struct task_struct *curr = current;
struct held_lock *hlock, *prev_hlock;
struct lock_class_stats *stats;
unsigned int depth;
u64 now;
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
depth = curr->lockdep_depth;
if (DEBUG_LOCKS_WARN_ON(!depth))
return;
prev_hlock = NULL;
for (i = depth-1; i >= 0; i--) {
hlock = curr->held_locks + i;
/*
* We must not cross into another context:
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
if (hlock->instance == lock)
goto found_it;
prev_hlock = hlock;
}
print_lock_contention_bug(curr, lock, _RET_IP_);
return;
found_it:
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
now = sched_clock();
waittime = now - hlock->waittime_stamp;
hlock->holdtime_stamp = now;
}
stats = get_lock_stats(hlock->class);
if (waittime) {
if (hlock->read)
lock_time_inc(&stats->read_waittime, waittime);
else
lock_time_inc(&stats->write_waittime, waittime);
}
if (lock->cpu != cpu)
stats->bounces[bounce_acquired + !!hlock->read]++;
}
void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
if (unlikely(!lock_stat))