Newer
Older
/*
* kernel/lockdep.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
*
* - lock inversion scenarios
* - circular lock dependencies
* - hardirq/softirq safe/unsafe locking bugs
*
* Bugs are reported even if the current locking scenario does not cause
* any deadlock at this point.
*
* I.e. if anytime in the past two locks were taken in a different order,
* even if it happened for another task, even if those were different
* locks (but of the same class as this lock), this code will detect it.
*
* Thanks to Arjan van de Ven for coming up with the initial idea of
* mapping lock dependencies runtime.
*/
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
#include <asm/sections.h>
#include "lockdep_internals.h"
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif
#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif
* lockdep_lock: protects the lockdep graph, the hashes and the
* class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code...
static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static int graph_lock(void)
{
__raw_spin_lock(&lockdep_lock);
/*
* Make sure that if another CPU detected a bug while
* walking the graph we dont change it (while the other
* CPU is busy printing out stuff with the graph lock
* dropped already)
*/
if (!debug_locks) {
__raw_spin_unlock(&lockdep_lock);
return 0;
}
return 1;
}
static inline int graph_unlock(void)
{
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
return DEBUG_LOCKS_WARN_ON(1);
__raw_spin_unlock(&lockdep_lock);
return 0;
}
/*
* Turn lock debugging off and return with 0 if it was off already,
* and also release the graph lock:
*/
static inline int debug_locks_off_graph_unlock(void)
{
int ret = debug_locks_off();
__raw_spin_unlock(&lockdep_lock);
return ret;
}
static int lockdep_initialized;
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
* All data structures here are protected by the global debug_lock.
*
* Mutex key structs only get allocated, once during bootup, and never
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
static int lock_contention_point(struct lock_class *class, unsigned long ip)
{
int i;
for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
if (class->contention_point[i] == 0) {
class->contention_point[i] = ip;
break;
}
if (class->contention_point[i] == ip)
break;
}
return i;
}
static void lock_time_inc(struct lock_time *lt, s64 time)
{
if (time > lt->max)
lt->max = time;
if (time < lt->min || !lt->min)
lt->min = time;
lt->total += time;
lt->nr++;
}
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
dst->min += src->min;
dst->max += src->max;
dst->total += src->total;
dst->nr += src->nr;
}
struct lock_class_stats lock_stats(struct lock_class *class)
{
struct lock_class_stats stats;
int cpu, i;
memset(&stats, 0, sizeof(struct lock_class_stats));
for_each_possible_cpu(cpu) {
struct lock_class_stats *pcs =
&per_cpu(lock_stats, cpu)[class - lock_classes];
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
stats.contention_point[i] += pcs->contention_point[i];
lock_time_add(&pcs->read_waittime, &stats.read_waittime);
lock_time_add(&pcs->write_waittime, &stats.write_waittime);
lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
stats.bounces[i] += pcs->bounces[i];
}
return stats;
}
void clear_lock_stats(struct lock_class *class)
{
int cpu;
for_each_possible_cpu(cpu) {
struct lock_class_stats *cpu_stats =
&per_cpu(lock_stats, cpu)[class - lock_classes];
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
}
memset(class->contention_point, 0, sizeof(class->contention_point));
}
Loading
Loading full blame...