Newer
Older
enum lock_usage_bit bit_forwards, const char *irqclass)
{
int ret;
find_usage_bit = bit_backwards;
/* fills in <backwards_match> */
ret = find_usage_backwards(prev->class, 0);
if (!ret || ret == 1)
return ret;
find_usage_bit = bit_forwards;
ret = find_usage_forwards(next->class, 0);
if (!ret || ret == 1)
return ret;
/* ret == 2 */
return print_bad_irq_dependency(curr, prev, next,
bit_backwards, bit_forwards, irqclass);
}
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
static int
check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
/*
* Prove that the new dependency does not connect a hardirq-safe
* lock with a hardirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
LOCK_ENABLED_HARDIRQS, "hard"))
return 0;
/*
* Prove that the new dependency does not connect a hardirq-safe-read
* lock with a hardirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
LOCK_ENABLED_HARDIRQS, "hard-read"))
return 0;
/*
* Prove that the new dependency does not connect a softirq-safe
* lock with a softirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
LOCK_ENABLED_SOFTIRQS, "soft"))
return 0;
/*
* Prove that the new dependency does not connect a softirq-safe-read
* lock with a softirq-unsafe lock - to achieve this we search
* the backwards-subgraph starting at <prev>, and the
* forwards-subgraph starting at <next>:
*/
if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
LOCK_ENABLED_SOFTIRQS, "soft"))
return 0;
return 1;
}
static void inc_chains(void)
{
if (current->hardirq_context)
nr_hardirq_chains++;
else {
if (current->softirq_context)
nr_softirq_chains++;
else
nr_process_chains++;
}
}
#else
static inline int
check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
return 1;
}
static inline void inc_chains(void)
{
nr_process_chains++;
}
#endif
static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=============================================\n");
printk( "[ INFO: possible recursive locking detected ]\n");
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
printk( "---------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, curr->pid);
print_lock(next);
printk("\nbut task is already holding lock:\n");
print_lock(prev);
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Check whether we are holding such a class already.
*
* (Note that this has to be done separately, because the graph cannot
* detect such classes of deadlocks.)
*
* Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
*/
static int
check_deadlock(struct task_struct *curr, struct held_lock *next,
struct lockdep_map *next_instance, int read)
{
struct held_lock *prev;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
prev = curr->held_locks + i;
if (prev->class != next->class)
continue;
/*
* Allow read-after-read recursion of the same
* lock class (i.e. read_lock(lock)+read_lock(lock)):
if ((read == 2) && prev->read)
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
return 2;
return print_deadlock_bug(curr, prev, next);
}
return 1;
}
/*
* There was a chain-cache miss, and we are about to add a new dependency
* to a previous lock. We recursively validate the following rules:
*
* - would the adding of the <prev> -> <next> dependency create a
* circular dependency in the graph? [== circular deadlock]
*
* - does the new prev->next dependency connect any hardirq-safe lock
* (in the full backwards-subgraph starting at <prev>) with any
* hardirq-unsafe lock (in the full forwards-subgraph starting at
* <next>)? [== illegal lock inversion with hardirq contexts]
*
* - does the new prev->next dependency connect any softirq-safe lock
* (in the full backwards-subgraph starting at <prev>) with any
* softirq-unsafe lock (in the full forwards-subgraph starting at
* <next>)? [== illegal lock inversion with softirq contexts]
*
* any of these scenarios could lead to a deadlock.
*
* Then if all the validations pass, we add the forwards and backwards
* dependency.
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next, int distance)
{
struct lock_list *entry;
int ret;
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
* forward-recursing into the graph starting at <next>, and
* checking whether we can reach <prev>.)
*
* We are using global variables to control the recursion, to
* keep the stackframe size of the recursive functions low:
*/
check_source = next;
check_target = prev;
if (!(check_noncircular(next->class, 0)))
return print_circular_bug_tail();
if (!check_prev_add_irq(curr, prev, next))
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
return 0;
/*
* For recursive read-locks we do all the dependency checks,
* but we dont store read-triggered dependencies (only
* write-triggered dependencies). This ensures that only the
* write-side dependencies matter, and that if for example a
* write-lock never takes any other locks, then the reads are
* equivalent to a NOP.
*/
if (next->read == 2 || prev->read == 2)
return 1;
/*
* Is the <prev> -> <next> dependency already present?
*
* (this may occur even though this is a new chain: consider
* e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
* chains - the second one will be new, but L1 already has
* L2 added to its dependency list, due to the first chain.)
*/
list_for_each_entry(entry, &prev->class->locks_after, entry) {
if (entry->class == next->class) {
if (distance == 1)
entry->distance = 1;
}
/*
* Ok, all validations passed, add the new lock
* to the previous lock's dependency list:
*/
ret = add_lock_to_list(prev->class, next->class,
&prev->class->locks_after, next->acquire_ip, distance);
&next->class->locks_before, next->acquire_ip, distance);
* Debugging printouts:
*/
if (verbose(prev->class) || verbose(next->class)) {
graph_unlock();
printk("\n new dependency: ");
print_lock_name(prev->class);
printk(" => ");
print_lock_name(next->class);
printk("\n");
/*
* Add the dependency to all directly-previous locks that are 'relevant'.
* The ones that are relevant are (in increasing distance from curr):
* all consecutive trylock entries and the final non-trylock entry - or
* the end of this context's lock-chain - whichever comes first.
*/
static int
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
struct held_lock *hlock;
* Debugging checks.
*
* Depth must not be zero for a non-head lock:
* At least two relevant locks must exist for this
* to be a head:
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
goto out_bug;
for (;;) {
int distance = curr->lockdep_depth - depth + 1;
hlock = curr->held_locks + depth-1;
/*
* Only non-recursive-read entries get new dependencies
* added:
*/
if (hlock->read != 2) {
if (!check_prev_add(curr, hlock, next, distance))
return 0;
/*
* Stop after the first non-trylock entry,
* as non-trylock entries have added their
* own direct dependencies already, so this
* lock is connected to them indirectly:
*/
if (!hlock->trylock)
break;
depth--;
/*
* End of lock-stack?
*/
if (!depth)
break;
/*
* Stop the search if we cross into another context:
*/
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
break;
return 1;
out_bug:
if (!debug_locks_off_graph_unlock())
return 0;
unsigned long nr_lock_chains;
static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
/*
* Look up a dependency chain. If the key is not present yet then
* add it and return 1 - in this case the new dependency chain is
* validated. If the key is already hashed, return 0.
* (On return with 1 graph_lock is held.)
static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
{
struct list_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return 0;
/*
* We can walk it lock-free, because entries only get added
* to the hash:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(&chain_lookup_hits);
printk("\nhash chain already cached, key: "
"%016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
(unsigned long long)chain_key, class->key, class->name);
/*
* Allocate a new chain entry from the static array, and add
* it to the hash:
*/
if (!graph_lock())
return 0;
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
list_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
graph_unlock();
goto cache_hit;
}
}
if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
if (!debug_locks_off_graph_unlock())
return 0;
printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
printk("turning off the locking correctness validator.\n");
return 0;
}
chain = lock_chains + nr_lock_chains++;
chain->chain_key = chain_key;
list_add_tail_rcu(&chain->entry, hash_head);
debug_atomic_inc(&chain_lookup_misses);
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
inc_chains();
return 1;
}
static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
struct held_lock *hlock, int chain_head)
{
/*
* Trylock needs to maintain the stack of held locks, but it
* does not add new dependencies, because trylock can be done
* in any order.
*
* We look up the chain_key and do the O(N^2) check and update of
* the dependencies only if this is a new dependency chain.
* (If lookup_chain_cache() returns with 1 it acquires
* graph_lock for us)
*/
if (!hlock->trylock && (hlock->check == 2) &&
lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
/*
* Check whether last held lock:
*
* - is irq-safe, if this lock is irq-unsafe
* - is softirq-safe, if this lock is hardirq-unsafe
*
* And check whether the new lock's dependency graph
* could lead back to the previous lock.
*
* any of these scenarios could lead to a deadlock. If
* All validations
*/
int ret = check_deadlock(curr, hlock, lock, hlock->read);
if (!ret)
return 0;
/*
* Mark recursive read, as we jump over it when
* building dependencies (just like we jump over
* trylock entries):
*/
if (ret == 2)
hlock->read = 2;
/*
* Add dependency only if this lock is not the head
* of the chain, and if it's not a secondary read-lock:
*/
if (!chain_head && ret != 2)
if (!check_prevs_add(curr, hlock))
return 0;
graph_unlock();
} else
/* after lookup_chain_cache(): */
if (unlikely(!debug_locks))
return 0;
#else
static inline int validate_chain(struct task_struct *curr,
struct lockdep_map *lock, struct held_lock *hlock,
int chain_head)
{
return 1;
}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
/*
* We are building curr_chain_key incrementally, so double-check
* it from scratch, to make sure that it's done correctly:
*/
static void check_chain_key(struct task_struct *curr)
{
#ifdef CONFIG_DEBUG_LOCKDEP
struct held_lock *hlock, *prev_hlock = NULL;
unsigned int i, id;
u64 chain_key = 0;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (chain_key != hlock->prev_chain_key) {
debug_locks_off();
printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)hlock->prev_chain_key);
WARN_ON(1);
return;
}
id = hlock->class - lock_classes;
if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
return;
if (prev_hlock && (prev_hlock->irq_context !=
hlock->irq_context))
chain_key = 0;
chain_key = iterate_chain_key(chain_key, id);
prev_hlock = hlock;
}
if (chain_key != curr->curr_chain_key) {
debug_locks_off();
printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
curr->lockdep_depth, i,
(unsigned long long)chain_key,
(unsigned long long)curr->curr_chain_key);
WARN_ON(1);
}
#endif
}
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
static int
print_usage_bug(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=================================\n");
printk( "[ INFO: inconsistent lock state ]\n");
print_kernel_version();
printk( "---------------------------------\n");
printk("inconsistent {%s} -> {%s} usage.\n",
usage_str[prev_bit], usage_str[new_bit]);
printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
curr->comm, curr->pid,
trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
trace_hardirqs_enabled(curr),
trace_softirqs_enabled(curr));
print_lock(this);
printk("{%s} state was registered at:\n", usage_str[prev_bit]);
print_stack_trace(this->class->usage_traces + prev_bit, 1);
print_irqtrace_events(curr);
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Print out an error if an invalid bit is set:
*/
static inline int
valid_state(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
{
if (unlikely(this->class->usage_mask & (1 << bad_bit)))
return print_usage_bug(curr, this, bad_bit, new_bit);
return 1;
}
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit);
#ifdef CONFIG_TRACE_IRQFLAGS
/*
* print irq inversion bug:
*/
static int
print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
struct held_lock *this, int forwards,
const char *irqclass)
{
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
printk("\n=========================================================\n");
printk( "[ INFO: possible irq lock inversion dependency detected ]\n");
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
printk( "---------------------------------------------------------\n");
printk("%s/%d just changed the state of lock:\n",
curr->comm, curr->pid);
print_lock(this);
if (forwards)
printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
else
printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
print_lock_name(other);
printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
printk("\nthe first lock's dependencies:\n");
print_lock_dependencies(this->class, 0);
printk("\nthe second lock's dependencies:\n");
print_lock_dependencies(other, 0);
printk("\nstack backtrace:\n");
dump_stack();
return 0;
}
/*
* Prove that in the forwards-direction subgraph starting at <this>
* there is no lock matching <mask>:
*/
static int
check_usage_forwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
find_usage_bit = bit;
/* fills in <forwards_match> */
ret = find_usage_forwards(this->class, 0);
if (!ret || ret == 1)
return ret;
return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
}
/*
* Prove that in the backwards-direction subgraph starting at <this>
* there is no lock matching <mask>:
*/
static int
check_usage_backwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
find_usage_bit = bit;
/* fills in <backwards_match> */
ret = find_usage_backwards(this->class, 0);
if (!ret || ret == 1)
return ret;
return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
}
void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
print_ip_sym(curr->hardirq_enable_ip);
printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
print_ip_sym(curr->hardirq_disable_ip);
printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
print_ip_sym(curr->softirq_enable_ip);
printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
print_ip_sym(curr->softirq_disable_ip);
}
static int hardirq_verbose(struct lock_class *class)
#if HARDIRQ_VERBOSE
return class_filter(class);
#endif
static int softirq_verbose(struct lock_class *class)
#if SOFTIRQ_VERBOSE
return class_filter(class);
#endif
return 0;
static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit)
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
case LOCK_USED_IN_HARDIRQ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_ENABLED_HARDIRQS_READ))
return 0;
/*
* just marked it hardirq-safe, check that this lock
* took no hardirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_HARDIRQS, "hard"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-safe, check that this lock
* took no hardirq-unsafe-read lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
return 0;
#endif
if (hardirq_verbose(this->class))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_ENABLED_SOFTIRQS_READ))
return 0;
/*
* just marked it softirq-safe, check that this lock
* took no softirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_SOFTIRQS, "soft"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-safe, check that this lock
* took no softirq-unsafe-read lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
return 0;
#endif
if (softirq_verbose(this->class))
ret = 2;
break;
case LOCK_USED_IN_HARDIRQ_READ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
return 0;
/*
* just marked it hardirq-read-safe, check that this lock
* took no hardirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_HARDIRQS, "hard"))
return 0;
if (hardirq_verbose(this->class))
ret = 2;
break;
case LOCK_USED_IN_SOFTIRQ_READ:
if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
return 0;
/*
* just marked it softirq-read-safe, check that this lock
* took no softirq-unsafe lock in the past:
*/
if (!check_usage_forwards(curr, this,
LOCK_ENABLED_SOFTIRQS, "soft"))
return 0;
if (softirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_HARDIRQ_READ))
return 0;
/*
* just marked it hardirq-unsafe, check that no hardirq-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-unsafe, check that no
* hardirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
return 0;
#endif
if (hardirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
return 0;
if (!valid_state(curr, this, new_bit,
LOCK_USED_IN_SOFTIRQ_READ))
return 0;
/*
* just marked it softirq-unsafe, check that no softirq-safe
* lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-unsafe, check that no
* softirq-safe-read lock in the system ever took
* it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
return 0;
#endif
if (softirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_HARDIRQS_READ:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it hardirq-read-unsafe, check that no
* hardirq-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_HARDIRQ, "hard"))
return 0;
#endif
if (hardirq_verbose(this->class))
ret = 2;
break;
case LOCK_ENABLED_SOFTIRQS_READ:
if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
return 0;
#if STRICT_READ_CHECKS
/*
* just marked it softirq-read-unsafe, check that no
* softirq-safe lock in the system ever took it in the past:
*/
if (!check_usage_backwards(curr, this,
LOCK_USED_IN_SOFTIRQ, "soft"))
return 0;
#endif
if (softirq_verbose(this->class))
ret = 2;
break;
default:
WARN_ON(1);
}
return ret;
}
/*
* Mark all held locks with a usage bit:
*/
static int
mark_held_locks(struct task_struct *curr, int hardirq)
{
enum lock_usage_bit usage_bit;
struct held_lock *hlock;
int i;
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
if (hardirq) {
if (hlock->read)
usage_bit = LOCK_ENABLED_HARDIRQS_READ;
else
usage_bit = LOCK_ENABLED_HARDIRQS;
} else {
if (hlock->read)
usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
else
usage_bit = LOCK_ENABLED_SOFTIRQS;
}
if (!mark_lock(curr, hlock, usage_bit))
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
return 0;
}
return 1;
}
/*
* Debugging helper: via this flag we know that we are in
* 'early bootup code', and will warn about any invalid irqs-on event:
*/
static int early_boot_irqs_enabled;
void early_boot_irqs_off(void)
{
early_boot_irqs_enabled = 0;
}
void early_boot_irqs_on(void)
{
early_boot_irqs_enabled = 1;
}
/*
* Hardirqs will be enabled:
*/
void trace_hardirqs_on(void)
{
struct task_struct *curr = current;
unsigned long ip;
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
return;
if (unlikely(curr->hardirqs_enabled)) {
debug_atomic_inc(&redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
curr->hardirqs_enabled = 1;
ip = (unsigned long) __builtin_return_address(0);
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
return;
/*
* We are going to turn hardirqs on, so set the
* usage bit for all held locks:
*/
if (!mark_held_locks(curr, 1))
return;
/*
* If we have softirqs enabled, then set the usage
* bit for all held locks. (disabled hardirqs prevented
* this bit from being set before)
*/
if (curr->softirqs_enabled)
if (!mark_held_locks(curr, 0))
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
curr->hardirq_enable_ip = ip;
curr->hardirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_on_events);
}
EXPORT_SYMBOL(trace_hardirqs_on);
/*
* Hardirqs were disabled:
*/
void trace_hardirqs_off(void)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks || current->lockdep_recursion))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->hardirqs_enabled) {
/*
* We have done an ON -> OFF transition:
*/
curr->hardirqs_enabled = 0;
curr->hardirq_disable_ip = _RET_IP_;
curr->hardirq_disable_event = ++curr->irq_events;
debug_atomic_inc(&hardirqs_off_events);
} else
debug_atomic_inc(&redundant_hardirqs_off);
}
EXPORT_SYMBOL(trace_hardirqs_off);
/*
* Softirqs will be enabled:
*/
void trace_softirqs_on(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
debug_atomic_inc(&redundant_softirqs_on);
return;
}
/*
* We'll do an OFF -> ON transition:
*/
curr->softirqs_enabled = 1;
curr->softirq_enable_ip = ip;
curr->softirq_enable_event = ++curr->irq_events;
debug_atomic_inc(&softirqs_on_events);
/*
* We are going to turn softirqs on, so set the
* usage bit for all held locks, if hardirqs are
* enabled too:
*/
if (curr->hardirqs_enabled)
mark_held_locks(curr, 0);
}
/*
* Softirqs were disabled:
*/
void trace_softirqs_off(unsigned long ip)
{
struct task_struct *curr = current;
if (unlikely(!debug_locks))
return;
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return;
if (curr->softirqs_enabled) {
/*
* We have done an ON -> OFF transition:
*/