Newer
Older
{
if (debug_locks_silent)
return 0;
printk("\n-> #%u", depth);
print_lock_name(target->class);
printk(":\n");
print_stack_trace(&target->trace, 6);
return 0;
}
/*
* When a circular dependency is detected, print the
* header first:
*/
static noinline int
print_circular_bug_header(struct lock_list *entry, unsigned int depth)
{
struct task_struct *curr = current;
if (debug_locks_silent)
return 0;
printk("\n=======================================================\n");
printk( "[ INFO: possible circular locking dependency detected ]\n");
print_kernel_version();
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
print_lock(check_source);
printk("\nbut task is already holding lock:\n");
print_lock(check_target);
printk("\nwhich lock already depends on the new lock.\n\n");
printk("\nthe existing dependency chain (in reverse order) is:\n");
print_circular_bug_entry(entry, depth);
return 0;
}
static inline int class_equal(struct lock_list *entry, void *data)
{
return entry->class == data;
}
static noinline int print_circular_bug(void)
{
struct task_struct *curr = current;
struct lock_list this;
struct lock_list *target;
struct lock_list *parent;
int result;
unsigned long depth;
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
this.class = hlock_class(check_source);
if (!save_trace(&this.trace))
return 0;
result = __bfs_forward(&this,
hlock_class(check_target),
class_equal,
&target);
if (result) {
printk("\n%s:search shortest path failed:%d\n", __func__,
result);
return 0;
}
depth = get_lock_depth(target);
print_circular_bug_header(target, depth);
parent = get_lock_parent(target);
while (parent) {
print_circular_bug_entry(parent, --depth);
parent = get_lock_parent(parent);
}
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
#define RECURSION_LIMIT 40
static int noinline print_infinite_recursion_bug(void)
{
if (!debug_locks_off_graph_unlock())
return 0;
WARN_ON(1);
return 0;
}
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
unsigned long __lockdep_count_forward_deps(struct lock_class *class,
unsigned int depth)
{
struct lock_list *entry;
unsigned long ret = 1;
if (lockdep_dependency_visit(class, depth))
return 0;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry(entry, &class->locks_after, entry)
ret += __lockdep_count_forward_deps(entry->class, depth + 1);
return ret;
}
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
unsigned long ret, flags;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(class, 0);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
}
unsigned long __lockdep_count_backward_deps(struct lock_class *class,
unsigned int depth)
{
struct lock_list *entry;
unsigned long ret = 1;
if (lockdep_dependency_visit(class, depth))
return 0;
/*
* Recurse this class's dependency list:
*/
list_for_each_entry(entry, &class->locks_before, entry)
ret += __lockdep_count_backward_deps(entry->class, depth + 1);
return ret;
}
unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
unsigned long ret, flags;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(class, 0);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
}
/*
* Prove that the dependency graph starting at <entry> can not
* lead to <target>. Print an error and return 0 if it does.
*/
static noinline int
check_noncircular(struct lock_class *source, unsigned int depth)
{
struct lock_list *entry;
if (lockdep_dependency_visit(source, depth))
return 1;
debug_atomic_inc(&nr_cyclic_check_recursions);
if (depth > max_recursion_depth)
if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
/*
* Check this lock's dependency list:
*/
list_for_each_entry(entry, &source->locks_after, entry) {
if (entry->class == hlock_class(check_target))
if (check_noncircular(entry->class, depth+1) == 2)
return 2;
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
/*
* Forwards and backwards subgraph searching, for the purposes of
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
static enum lock_usage_bit find_usage_bit;
static struct lock_class *forwards_match, *backwards_match;
/*
* Find a node in the forwards-direction dependency sub-graph starting
* at <source> that matches <find_usage_bit>.
*
* Return 2 if such a node exists in the subgraph, and put that node
* into <forwards_match>.
*
* Return 1 otherwise and keep <forwards_match> unchanged.
* Return 0 on error.
*/
static noinline int
find_usage_forwards(struct lock_class *source, unsigned int depth)
{
struct lock_list *entry;
int ret;
if (lockdep_dependency_visit(source, depth))
return 1;
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
return print_infinite_recursion_bug();
debug_atomic_inc(&nr_find_usage_forwards_checks);
if (source->usage_mask & (1 << find_usage_bit)) {
forwards_match = source;
return 2;
}
/*
* Check this lock's dependency list:
*/
list_for_each_entry(entry, &source->locks_after, entry) {
debug_atomic_inc(&nr_find_usage_forwards_recursions);
ret = find_usage_forwards(entry->class, depth+1);
if (ret == 2 || ret == 0)
return ret;
}
return 1;
}
/*
* Find a node in the backwards-direction dependency sub-graph starting
* at <source> that matches <find_usage_bit>.
*
* Return 2 if such a node exists in the subgraph, and put that node
* into <backwards_match>.
*
* Return 1 otherwise and keep <backwards_match> unchanged.
* Return 0 on error.
*/
static noinline int
find_usage_backwards(struct lock_class *source, unsigned int depth)
{
struct lock_list *entry;
int ret;
if (lockdep_dependency_visit(source, depth))
return 1;
if (!__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
if (depth > max_recursion_depth)
max_recursion_depth = depth;
if (depth >= RECURSION_LIMIT)
return print_infinite_recursion_bug();
debug_atomic_inc(&nr_find_usage_backwards_checks);
if (source->usage_mask & (1 << find_usage_bit)) {
backwards_match = source;
return 2;
}
if (!source && debug_locks_off_graph_unlock()) {
WARN_ON(1);
return 0;
}
/*
* Check this lock's dependency list:
*/
list_for_each_entry(entry, &source->locks_before, entry) {
debug_atomic_inc(&nr_find_usage_backwards_recursions);
ret = find_usage_backwards(entry->class, depth+1);
if (ret == 2 || ret == 0)
return ret;
}
return 1;
}
static int
print_bad_irq_dependency(struct task_struct *curr,
struct held_lock *prev,
struct held_lock *next,
enum lock_usage_bit bit1,
enum lock_usage_bit bit2,
const char *irqclass)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n======================================================\n");
printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
irqclass, irqclass);
printk( "------------------------------------------------------\n");
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
curr->comm, task_pid_nr(curr),
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
curr->hardirqs_enabled,
curr->softirqs_enabled);
print_lock(next);
printk("\nand this task is already holding:\n");
print_lock(prev);
printk("which would create a new lock dependency:\n");
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
printk("\n");
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
print_lock_name(backwards_match);
printk("\n... which became %s-irq-safe at:\n", irqclass);
print_stack_trace(backwards_match->usage_traces + bit1, 1);
printk("\nto a %s-irq-unsafe lock:\n", irqclass);
print_lock_name(forwards_match);
printk("\n... which became %s-irq-unsafe at:\n", irqclass);
printk("...");
print_stack_trace(forwards_match->usage_traces + bit2, 1);
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
print_lock_dependencies(backwards_match, 0);
printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
print_lock_dependencies(forwards_match, 0);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
static int
check_usage(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, enum lock_usage_bit bit_backwards,
enum lock_usage_bit bit_forwards, const char *irqclass)
{
int ret;
find_usage_bit = bit_backwards;
/* fills in <backwards_match> */
ret = find_usage_backwards(hlock_class(prev), 0);
if (!ret || ret == 1)
return ret;
find_usage_bit = bit_forwards;
ret = find_usage_forwards(hlock_class(next), 0);
if (!ret || ret == 1)
return ret;
/* ret == 2 */
return print_bad_irq_dependency(curr, prev, next,
bit_backwards, bit_forwards, irqclass);
}
static const char *state_names[] = {
#define LOCKDEP_STATE(__STATE) \
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};
static const char *state_rnames[] = {
#define LOCKDEP_STATE(__STATE) \
#include "lockdep_states.h"
#undef LOCKDEP_STATE
};
static inline const char *state_name(enum lock_usage_bit bit)
return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
}
static int exclusive_bit(int new_bit)
{
* USED_IN
* USED_IN_READ
* ENABLED
* ENABLED_READ
*
* bit 0 - write/read
* bit 1 - used_in/enabled
* bit 2+ state
int state = new_bit & ~3;
int dir = new_bit & 2;
* keep state, bit flip the direction and strip read.
return state | (dir ^ 2);
}
static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, enum lock_usage_bit bit)
{
* Prove that the new dependency does not connect a hardirq-safe
* lock with a hardirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
if (!check_usage(curr, prev, next, bit,
exclusive_bit(bit), state_name(bit)))
* Prove that the new dependency does not connect a hardirq-safe-read
* lock with a hardirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
if (!check_usage(curr, prev, next, bit,
exclusive_bit(bit), state_name(bit)))
return 1;
}
static int
check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
#define LOCKDEP_STATE(__STATE) \
if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
#include "lockdep_states.h"
#undef LOCKDEP_STATE
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
return 1;
}
static void inc_chains(void)
{
if (current->hardirq_context)
nr_hardirq_chains++;
else {
if (current->softirq_context)
nr_softirq_chains++;
else
nr_process_chains++;
}
}
#else
static inline int
check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
return 1;
}
static inline void inc_chains(void)
{
nr_process_chains++;
}
#endif
static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=============================================\n");
printk( "[ INFO: possible recursive locking detected ]\n");
printk( "---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
print_lock(next);
printk("\nbut task is already holding lock:\n");
print_lock(prev);
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Check whether we are holding such a class already.
*
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
check_deadlock(struct task_struct *curr, struct held_lock *next,
struct lockdep_map *next_instance, int read)
{
struct held_lock *prev;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
prev = curr->held_locks + i;
if (prev->instance == next->nest_lock)
nest = prev;
if (hlock_class(prev) != hlock_class(next))
/*
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
if ((read == 2) && prev->read)
/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
*/
if (nest)
return 2;
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
return print_deadlock_bug(curr, prev, next);
}
return 1;
}
/*
* There was a chain-cache miss, and we are about to add a new dependency
* to a previous lock. We recursively validate the following rules:
*
* - would the adding of the <prev> -> <next> dependency create a
* circular dependency in the graph? [== circular deadlock]
*
* - does the new prev->next dependency connect any hardirq-safe lock
* (in the full backwards-subgraph starting at <prev>) with any
* hardirq-unsafe lock (in the full forwards-subgraph starting at
* <next>)? [== illegal lock inversion with hardirq contexts]
*
* - does the new prev->next dependency connect any softirq-safe lock
* (in the full backwards-subgraph starting at <prev>) with any
* softirq-unsafe lock (in the full forwards-subgraph starting at
* <next>)? [== illegal lock inversion with softirq contexts]
*
* any of these scenarios could lead to a deadlock.
*
* Then if all the validations pass, we add the forwards and backwards
* dependency.
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance)
{
struct lock_list *entry;
int ret;
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
* forward-recursing into the graph starting at <next>, and
* checking whether we can reach <prev>.)
*
* We are using global variables to control the recursion, to
* keep the stackframe size of the recursive functions low:
*/
check_source = next;
check_target = prev;
if (check_noncircular(hlock_class(next), 0) == 2)
return print_circular_bug();
if (!check_prev_add_irq(curr, prev, next))
return 0;
/*
* For recursive read-locks we do all the dependency checks,
* but we dont store read-triggered dependencies (only
* write-triggered dependencies). This ensures that only the
* write-side dependencies matter, and that if for example a
* write-lock never takes any other locks, then the reads are
* equivalent to a NOP.
*/
if (next->read == 2 || prev->read == 2)
return 1;
/*
* Is the <prev> -> <next> dependency already present?
*
* (this may occur even though this is a new chain: consider
* e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
if (entry->class == hlock_class(next)) {
if (distance == 1)
entry->distance = 1;
}
/*
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
&hlock_class(prev)->locks_after,
next->acquire_ip, distance);
ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
&hlock_class(next)->locks_before,
next->acquire_ip, distance);
if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
graph_unlock();
printk("\n new dependency: ");
/*
* Add the dependency to all directly-previous locks that are 'relevant'.
* The ones that are relevant are (in increasing distance from curr):
* all consecutive trylock entries and the final non-trylock entry - or
* the end of this context's lock-chain - whichever comes first.
*/
static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
struct held_lock *hlock;
* Debugging checks.
*
* Depth must not be zero for a non-head lock:
* At least two relevant locks must exist for this
* to be a head:
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
goto out_bug;
for (;;) {
int distance = curr->lockdep_depth - depth + 1;
hlock = curr->held_locks + depth-1;
/*
* Only non-recursive-read entries get new dependencies
* added:
*/
if (hlock->read != 2) {
if (!check_prev_add(curr, hlock, next, distance))
return 0;
/*
* Stop after the first non-trylock entry,
* as non-trylock entries have added their
* own direct dependencies already, so this
* lock is connected to them indirectly:
*/
if (!hlock->trylock)
break;
depth--;
/*
* End of lock-stack?
*/
if (!depth)
break;
/*
* Stop the search if we cross into another context:
*/
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
break;
return 1;
out_bug:
if (!debug_locks_off_graph_unlock())
return 0;
struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
int nr_chain_hlocks;
static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
{
return lock_classes + chain_hlocks[chain->base + i];
}
/*
* Look up a dependency chain. If the key is not present yet then
* add it and return 1 - in this case the new dependency chain is
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
static inline int lookup_chain_cache(struct task_struct *curr,
struct held_lock *hlock,
u64 chain_key)
struct lock_class *class = hlock_class(hlock);
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
struct held_lock *hlock_curr, *hlock_next;
int i, j, n, cn;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(&chain_lookup_hits);
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key, class->key, class->name);
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
if (!debug_locks_off_graph_unlock())
return 0;
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
chain->irq_context = hlock->irq_context;
/* Find the first held_lock of current chain */
hlock_next = hlock;
for (i = curr->lockdep_depth - 1; i >= 0; i--) {
hlock_curr = curr->held_locks + i;
if (hlock_curr->irq_context != hlock_next->irq_context)
break;
hlock_next = hlock;
}
i++;
chain->depth = curr->lockdep_depth + 1 - i;
cn = nr_chain_hlocks;
while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) {
n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth);
if (n == cn)
break;
cn = n;
}
if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = cn;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx - 1;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
}
list_add_tail_rcu(&chain->entry, hash_head);
debug_atomic_inc(&chain_lookup_misses);
inc_chains();
return 1;
}
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
struct held_lock *hlock, int chain_head, u64 chain_key)
{
/*
* Trylock needs to maintain the stack of held locks, but it
* does not add new dependencies, because trylock can be done
* in any order.
*
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
lookup_chain_cache(curr, hlock, chain_key)) {
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
/*
* Check whether last held lock:
*
* - is irq-safe, if this lock is irq-unsafe
* - is softirq-safe, if this lock is hardirq-unsafe
*
* And check whether the new lock's dependency graph
* could lead back to the previous lock.
*
* any of these scenarios could lead to a deadlock. If
* All validations
*/
int ret = check_deadlock(curr, hlock, lock, hlock->read);
if (!ret)
return 0;
/*
* Mark recursive read, as we jump over it when
* building dependencies (just like we jump over
* trylock entries):
*/
if (ret == 2)
hlock->read = 2;
/*
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
graph_unlock();
} else
/* after lookup_chain_cache(): */
if (unlikely(!debug_locks))
return 0;
#else
static inline int validate_chain(struct task_struct *curr,
struct lockdep_map *lock, struct held_lock *hlock,
int chain_head, u64 chain_key)
/*
* We are building curr_chain_key incrementally, so double-check
* it from scratch, to make sure that it's done correctly:
*/
static void check_chain_key(struct task_struct *curr)
{
#ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
unsigned int i, id;
u64 chain_key = 0;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (chain_key != hlock->prev_chain_key) {
debug_locks_off();
WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)hlock->prev_chain_key);
return;
}
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return;
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = 0;
chain_key = iterate_chain_key(chain_key, id);
prev_hlock = hlock;
}
if (chain_key != curr->curr_chain_key) {
debug_locks_off();
WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)curr->curr_chain_key);
}
#endif
}
static int
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=================================\n");
printk( "[ INFO: inconsistent lock state ]\n");
print_kernel_version();
printk( "---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
usage_str[prev_bit], usage_str[new_bit]);
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
curr->comm, task_pid_nr(curr),
trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
trace_hardirqs_enabled(curr),
trace_softirqs_enabled(curr));
print_lock(this);
printk("{%s} state was registered at:\n", usage_str[prev_bit]);
print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
print_irqtrace_events(curr);
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Print out an error if an invalid bit is set:
*/
static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
return print_usage_bug(curr, this, bad_bit, new_bit);
return 1;
}
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit);
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)