lockdep.c 90.5 KB
Newer Older
Ingo Molnar's avatar
Ingo Molnar committed
1 2 3 4 5 6 7
/*
 * kernel/lockdep.c
 *
 * Runtime locking correctness validator
 *
 * Started by Ingo Molnar:
 *
Peter Zijlstra's avatar
Peter Zijlstra committed
8 9
 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnar's avatar
Ingo Molnar committed
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * this code maps all the lock dependencies as they occur in a live kernel
 * and will warn about the following classes of locking bugs:
 *
 * - lock inversion scenarios
 * - circular lock dependencies
 * - hardirq/softirq safe/unsafe locking bugs
 *
 * Bugs are reported even if the current locking scenario does not cause
 * any deadlock at this point.
 *
 * I.e. if anytime in the past two locks were taken in a different order,
 * even if it happened for another task, even if those were different
 * locks (but of the same class as this lock), this code will detect it.
 *
 * Thanks to Arjan van de Ven for coming up with the initial idea of
 * mapping lock dependencies runtime.
 */
28
#define DISABLE_BRANCH_PROFILING
Ingo Molnar's avatar
Ingo Molnar committed
29 30 31 32 33 34 35 36 37 38 39 40
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
41
#include <linux/utsname.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
42
#include <linux/hash.h>
43
#include <linux/ftrace.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
44
#include <linux/stringify.h>
45
#include <linux/bitops.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
46

Ingo Molnar's avatar
Ingo Molnar committed
47 48 49 50
#include <asm/sections.h>

#include "lockdep_internals.h"

51
#define CREATE_TRACE_POINTS
52
#include <trace/events/lock.h>
53

Peter Zijlstra's avatar
Peter Zijlstra committed
54 55 56 57 58 59 60 61 62 63 64 65 66 67
#ifdef CONFIG_PROVE_LOCKING
int prove_locking = 1;
module_param(prove_locking, int, 0644);
#else
#define prove_locking 0
#endif

#ifdef CONFIG_LOCK_STAT
int lock_stat = 1;
module_param(lock_stat, int, 0644);
#else
#define lock_stat 0
#endif

Ingo Molnar's avatar
Ingo Molnar committed
68
/*
69 70
 * lockdep_lock: protects the lockdep graph, the hashes and the
 *               class/list/hash allocators.
Ingo Molnar's avatar
Ingo Molnar committed
71 72 73
 *
 * This is one of the rare exceptions where it's justified
 * to use a raw spinlock - we really dont want the spinlock
74
 * code to recurse back into the lockdep code...
Ingo Molnar's avatar
Ingo Molnar committed
75
 */
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;

static int graph_lock(void)
{
	__raw_spin_lock(&lockdep_lock);
	/*
	 * Make sure that if another CPU detected a bug while
	 * walking the graph we dont change it (while the other
	 * CPU is busy printing out stuff with the graph lock
	 * dropped already)
	 */
	if (!debug_locks) {
		__raw_spin_unlock(&lockdep_lock);
		return 0;
	}
91 92
	/* prevent any recursions within lockdep from causing deadlocks */
	current->lockdep_recursion++;
93 94 95 96 97
	return 1;
}

static inline int graph_unlock(void)
{
98 99 100
	if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
		return DEBUG_LOCKS_WARN_ON(1);

101
	current->lockdep_recursion--;
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	__raw_spin_unlock(&lockdep_lock);
	return 0;
}

/*
 * Turn lock debugging off and return with 0 if it was off already,
 * and also release the graph lock:
 */
static inline int debug_locks_off_graph_unlock(void)
{
	int ret = debug_locks_off();

	__raw_spin_unlock(&lockdep_lock);

	return ret;
}
Ingo Molnar's avatar
Ingo Molnar committed
118 119 120 121

static int lockdep_initialized;

unsigned long nr_list_entries;
Peter Zijlstra's avatar
Peter Zijlstra committed
122
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
Ingo Molnar's avatar
Ingo Molnar committed
123 124 125 126 127 128 129 130 131 132

/*
 * All data structures here are protected by the global debug_lock.
 *
 * Mutex key structs only get allocated, once during bootup, and never
 * get freed - this significantly simplifies the debugging code.
 */
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];

133 134 135 136 137 138 139 140 141
static inline struct lock_class *hlock_class(struct held_lock *hlock)
{
	if (!hlock->class_idx) {
		DEBUG_LOCKS_WARN_ON(1);
		return NULL;
	}
	return lock_classes + hlock->class_idx - 1;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
142 143 144
#ifdef CONFIG_LOCK_STAT
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);

145 146 147 148 149
static inline u64 lockstat_clock(void)
{
	return cpu_clock(smp_processor_id());
}

Peter Zijlstra's avatar
Peter Zijlstra committed
150
static int lock_point(unsigned long points[], unsigned long ip)
Peter Zijlstra's avatar
Peter Zijlstra committed
151 152 153
{
	int i;

Peter Zijlstra's avatar
Peter Zijlstra committed
154 155 156
	for (i = 0; i < LOCKSTAT_POINTS; i++) {
		if (points[i] == 0) {
			points[i] = ip;
Peter Zijlstra's avatar
Peter Zijlstra committed
157 158
			break;
		}
Peter Zijlstra's avatar
Peter Zijlstra committed
159
		if (points[i] == ip)
Peter Zijlstra's avatar
Peter Zijlstra committed
160 161 162 163 164 165
			break;
	}

	return i;
}

166
static void lock_time_inc(struct lock_time *lt, u64 time)
Peter Zijlstra's avatar
Peter Zijlstra committed
167 168 169 170
{
	if (time > lt->max)
		lt->max = time;

171
	if (time < lt->min || !lt->nr)
Peter Zijlstra's avatar
Peter Zijlstra committed
172 173 174 175 176 177
		lt->min = time;

	lt->total += time;
	lt->nr++;
}

178 179
static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
{
180 181 182 183 184 185 186 187 188
	if (!src->nr)
		return;

	if (src->max > dst->max)
		dst->max = src->max;

	if (src->min < dst->min || !dst->nr)
		dst->min = src->min;

189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
	dst->total += src->total;
	dst->nr += src->nr;
}

struct lock_class_stats lock_stats(struct lock_class *class)
{
	struct lock_class_stats stats;
	int cpu, i;

	memset(&stats, 0, sizeof(struct lock_class_stats));
	for_each_possible_cpu(cpu) {
		struct lock_class_stats *pcs =
			&per_cpu(lock_stats, cpu)[class - lock_classes];

		for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
			stats.contention_point[i] += pcs->contention_point[i];

Peter Zijlstra's avatar
Peter Zijlstra committed
206 207 208
		for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
			stats.contending_point[i] += pcs->contending_point[i];

209 210 211 212 213
		lock_time_add(&pcs->read_waittime, &stats.read_waittime);
		lock_time_add(&pcs->write_waittime, &stats.write_waittime);

		lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
		lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
214 215 216

		for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
			stats.bounces[i] += pcs->bounces[i];
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
	}

	return stats;
}

void clear_lock_stats(struct lock_class *class)
{
	int cpu;

	for_each_possible_cpu(cpu) {
		struct lock_class_stats *cpu_stats =
			&per_cpu(lock_stats, cpu)[class - lock_classes];

		memset(cpu_stats, 0, sizeof(struct lock_class_stats));
	}
	memset(class->contention_point, 0, sizeof(class->contention_point));
Peter Zijlstra's avatar
Peter Zijlstra committed
233
	memset(class->contending_point, 0, sizeof(class->contending_point));
234 235
}

Peter Zijlstra's avatar
Peter Zijlstra committed
236 237 238 239 240 241 242 243 244 245 246 247 248
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
{
	return &get_cpu_var(lock_stats)[class - lock_classes];
}

static void put_lock_stats(struct lock_class_stats *stats)
{
	put_cpu_var(lock_stats);
}

static void lock_release_holdtime(struct held_lock *hlock)
{
	struct lock_class_stats *stats;
249
	u64 holdtime;
Peter Zijlstra's avatar
Peter Zijlstra committed
250 251 252 253

	if (!lock_stat)
		return;

254
	holdtime = lockstat_clock() - hlock->holdtime_stamp;
Peter Zijlstra's avatar
Peter Zijlstra committed
255

256
	stats = get_lock_stats(hlock_class(hlock));
Peter Zijlstra's avatar
Peter Zijlstra committed
257 258 259 260 261 262 263 264 265 266 267 268
	if (hlock->read)
		lock_time_inc(&stats->read_holdtime, holdtime);
	else
		lock_time_inc(&stats->write_holdtime, holdtime);
	put_lock_stats(stats);
}
#else
static inline void lock_release_holdtime(struct held_lock *hlock)
{
}
#endif

Ingo Molnar's avatar
Ingo Molnar committed
269 270 271 272 273 274 275 276 277 278 279 280
/*
 * We keep a global list of all lock classes. The list only grows,
 * never shrinks. The list is only accessed with the lockdep
 * spinlock lock held.
 */
LIST_HEAD(all_lock_classes);

/*
 * The lockdep classes are in a hash-table as well, for fast lookup:
 */
#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
Peter Zijlstra's avatar
Peter Zijlstra committed
281
#define __classhashfn(key)	hash_long((unsigned long)key, CLASSHASH_BITS)
Ingo Molnar's avatar
Ingo Molnar committed
282 283 284 285 286 287 288 289 290 291
#define classhashentry(key)	(classhash_table + __classhashfn((key)))

static struct list_head classhash_table[CLASSHASH_SIZE];

/*
 * We put the lock dependency chains into a hash-table as well, to cache
 * their existence:
 */
#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
Peter Zijlstra's avatar
Peter Zijlstra committed
292
#define __chainhashfn(chain)	hash_long(chain, CHAINHASH_BITS)
Ingo Molnar's avatar
Ingo Molnar committed
293 294 295 296 297 298 299 300 301 302 303
#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))

static struct list_head chainhash_table[CHAINHASH_SIZE];

/*
 * The hash key of the lock dependency chains is a hash itself too:
 * it's a hash of all locks taken up to that lock, including that lock.
 * It's a 64-bit hash, because it's important for the keys to be
 * unique.
 */
#define iterate_chain_key(key1, key2) \
304 305
	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
Ingo Molnar's avatar
Ingo Molnar committed
306 307
	(key2))

308
void lockdep_off(void)
Ingo Molnar's avatar
Ingo Molnar committed
309 310 311 312 313
{
	current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);

314
void lockdep_on(void)
Ingo Molnar's avatar
Ingo Molnar committed
315 316 317 318 319 320 321 322 323 324
{
	current->lockdep_recursion--;
}
EXPORT_SYMBOL(lockdep_on);

/*
 * Debugging switches:
 */

#define VERBOSE			0
325
#define VERY_VERBOSE		0
Ingo Molnar's avatar
Ingo Molnar committed
326 327 328 329

#if VERBOSE
# define HARDIRQ_VERBOSE	1
# define SOFTIRQ_VERBOSE	1
330
# define RECLAIM_VERBOSE	1
Ingo Molnar's avatar
Ingo Molnar committed
331 332 333
#else
# define HARDIRQ_VERBOSE	0
# define SOFTIRQ_VERBOSE	0
334
# define RECLAIM_VERBOSE	0
Ingo Molnar's avatar
Ingo Molnar committed
335 336
#endif

337
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
Ingo Molnar's avatar
Ingo Molnar committed
338 339 340 341 342
/*
 * Quick filtering for interesting events:
 */
static int class_filter(struct lock_class *class)
{
343 344
#if 0
	/* Example */
Ingo Molnar's avatar
Ingo Molnar committed
345
	if (class->name_version == 1 &&
346
			!strcmp(class->name, "lockname"))
Ingo Molnar's avatar
Ingo Molnar committed
347 348
		return 1;
	if (class->name_version == 1 &&
349
			!strcmp(class->name, "&struct->lockfield"))
Ingo Molnar's avatar
Ingo Molnar committed
350
		return 1;
351
#endif
352 353
	/* Filter everything else. 1 would be to allow everything else */
	return 0;
Ingo Molnar's avatar
Ingo Molnar committed
354 355 356 357 358 359 360 361 362 363 364 365 366
}
#endif

static int verbose(struct lock_class *class)
{
#if VERBOSE
	return class_filter(class);
#endif
	return 0;
}

/*
 * Stack-trace: tightly packed array of stack backtrace
367
 * addresses. Protected by the graph_lock.
Ingo Molnar's avatar
Ingo Molnar committed
368 369 370 371 372 373 374 375 376 377
 */
unsigned long nr_stack_trace_entries;
static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];

static int save_trace(struct stack_trace *trace)
{
	trace->nr_entries = 0;
	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
	trace->entries = stack_trace + nr_stack_trace_entries;

378 379
	trace->skip = 3;

380
	save_stack_trace(trace);
Ingo Molnar's avatar
Ingo Molnar committed
381

Peter Zijlstra's avatar
Peter Zijlstra committed
382 383 384 385 386 387 388
	/*
	 * Some daft arches put -1 at the end to indicate its a full trace.
	 *
	 * <rant> this is buggy anyway, since it takes a whole extra entry so a
	 * complete trace that maxes out the entries provided will be reported
	 * as incomplete, friggin useless </rant>
	 */
389 390
	if (trace->nr_entries != 0 &&
	    trace->entries[trace->nr_entries-1] == ULONG_MAX)
Peter Zijlstra's avatar
Peter Zijlstra committed
391 392
		trace->nr_entries--;

Ingo Molnar's avatar
Ingo Molnar committed
393 394 395 396
	trace->max_entries = trace->nr_entries;

	nr_stack_trace_entries += trace->nr_entries;

Peter Zijlstra's avatar
Peter Zijlstra committed
397
	if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
398 399 400 401 402 403 404
		if (!debug_locks_off_graph_unlock())
			return 0;

		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();

Ingo Molnar's avatar
Ingo Molnar committed
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
		return 0;
	}

	return 1;
}

unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;

#ifdef CONFIG_DEBUG_LOCKDEP
/*
 * We cannot printk in early bootup code. Not even early_printk()
 * might work. So we mark any initialization errors and printk
 * about it later on, in lockdep_info().
 */
static int lockdep_init_error;
423 424 425 426 427
static unsigned long lockdep_init_trace_data[20];
static struct stack_trace lockdep_init_trace = {
	.max_entries = ARRAY_SIZE(lockdep_init_trace_data),
	.entries = lockdep_init_trace_data,
};
Ingo Molnar's avatar
Ingo Molnar committed
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451

/*
 * Various lockdep statistics:
 */
atomic_t chain_lookup_hits;
atomic_t chain_lookup_misses;
atomic_t hardirqs_on_events;
atomic_t hardirqs_off_events;
atomic_t redundant_hardirqs_on;
atomic_t redundant_hardirqs_off;
atomic_t softirqs_on_events;
atomic_t softirqs_off_events;
atomic_t redundant_softirqs_on;
atomic_t redundant_softirqs_off;
atomic_t nr_unused_locks;
atomic_t nr_cyclic_checks;
atomic_t nr_find_usage_forwards_checks;
atomic_t nr_find_usage_backwards_checks;
#endif

/*
 * Locking printouts:
 */

452
#define __USAGE(__STATE)						\
Peter Zijlstra's avatar
Peter Zijlstra committed
453 454 455 456
	[LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",	\
	[LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",		\
	[LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
	[LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
457

Ingo Molnar's avatar
Ingo Molnar committed
458 459
static const char *usage_str[] =
{
460 461 462 463
#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
#include "lockdep_states.h"
#undef LOCKDEP_STATE
	[LOCK_USED] = "INITIAL USE",
Ingo Molnar's avatar
Ingo Molnar committed
464 465 466 467
};

const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
{
Alexey Dobriyan's avatar
Alexey Dobriyan committed
468
	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
Ingo Molnar's avatar
Ingo Molnar committed
469 470
}

471
static inline unsigned long lock_flag(enum lock_usage_bit bit)
Ingo Molnar's avatar
Ingo Molnar committed
472
{
473 474
	return 1UL << bit;
}
Ingo Molnar's avatar
Ingo Molnar committed
475

476 477 478 479 480 481 482 483 484 485
static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
{
	char c = '.';

	if (class->usage_mask & lock_flag(bit + 2))
		c = '+';
	if (class->usage_mask & lock_flag(bit)) {
		c = '-';
		if (class->usage_mask & lock_flag(bit + 2))
			c = '?';
Ingo Molnar's avatar
Ingo Molnar committed
486 487
	}

488 489
	return c;
}
490

491
void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
492
{
493
	int i = 0;
494

495 496 497 498 499 500 501
#define LOCKDEP_STATE(__STATE) 						\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);	\
	usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
#include "lockdep_states.h"
#undef LOCKDEP_STATE

	usage[i] = '\0';
Ingo Molnar's avatar
Ingo Molnar committed
502 503 504 505
}

static void print_lock_name(struct lock_class *class)
{
506
	char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
Ingo Molnar's avatar
Ingo Molnar committed
507 508
	const char *name;

509
	get_usage_chars(class, usage);
Ingo Molnar's avatar
Ingo Molnar committed
510 511 512 513 514 515 516 517 518 519 520 521

	name = class->name;
	if (!name) {
		name = __get_key_name(class->key, str);
		printk(" (%s", name);
	} else {
		printk(" (%s", name);
		if (class->name_version > 1)
			printk("#%d", class->name_version);
		if (class->subclass)
			printk("/%d", class->subclass);
	}
522
	printk("){%s}", usage);
Ingo Molnar's avatar
Ingo Molnar committed
523 524 525 526 527
}

static void print_lockdep_cache(struct lockdep_map *lock)
{
	const char *name;
528
	char str[KSYM_NAME_LEN];
Ingo Molnar's avatar
Ingo Molnar committed
529 530 531 532 533 534 535 536 537 538

	name = lock->name;
	if (!name)
		name = __get_key_name(lock->key->subkeys, str);

	printk("%s", name);
}

static void print_lock(struct held_lock *hlock)
{
539
	print_lock_name(hlock_class(hlock));
Ingo Molnar's avatar
Ingo Molnar committed
540 541 542 543 544 545 546 547 548
	printk(", at: ");
	print_ip_sym(hlock->acquire_ip);
}

static void lockdep_print_held_locks(struct task_struct *curr)
{
	int i, depth = curr->lockdep_depth;

	if (!depth) {
549
		printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed
550 551 552
		return;
	}
	printk("%d lock%s held by %s/%d:\n",
553
		depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
Ingo Molnar's avatar
Ingo Molnar committed
554 555 556 557 558 559 560

	for (i = 0; i < depth; i++) {
		printk(" #%d: ", i);
		print_lock(curr->held_locks + i);
	}
}

Peter Zijlstra's avatar
Peter Zijlstra committed
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
static void print_kernel_version(void)
{
	printk("%s %.*s\n", init_utsname()->release,
		(int)strcspn(init_utsname()->version, " "),
		init_utsname()->version);
}

static int very_verbose(struct lock_class *class)
{
#if VERY_VERBOSE
	return class_filter(class);
#endif
	return 0;
}

Ingo Molnar's avatar
Ingo Molnar committed
576
/*
Peter Zijlstra's avatar
Peter Zijlstra committed
577
 * Is this the address of a static object:
Ingo Molnar's avatar
Ingo Molnar committed
578
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
579
static int static_obj(void *obj)
Ingo Molnar's avatar
Ingo Molnar committed
580
{
Peter Zijlstra's avatar
Peter Zijlstra committed
581 582 583 584 585 586 587
	unsigned long start = (unsigned long) &_stext,
		      end   = (unsigned long) &_end,
		      addr  = (unsigned long) obj;
#ifdef CONFIG_SMP
	int i;
#endif

Ingo Molnar's avatar
Ingo Molnar committed
588
	/*
Peter Zijlstra's avatar
Peter Zijlstra committed
589
	 * static variable?
Ingo Molnar's avatar
Ingo Molnar committed
590
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
591 592
	if ((addr >= start) && (addr < end))
		return 1;
Ingo Molnar's avatar
Ingo Molnar committed
593

594 595 596
	if (arch_is_kernel_data(addr))
		return 1;

Peter Zijlstra's avatar
Peter Zijlstra committed
597
#ifdef CONFIG_SMP
Ingo Molnar's avatar
Ingo Molnar committed
598
	/*
Peter Zijlstra's avatar
Peter Zijlstra committed
599
	 * percpu var?
Ingo Molnar's avatar
Ingo Molnar committed
600
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
601 602 603 604
	for_each_possible_cpu(i) {
		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
					+ per_cpu_offset(i);
Ingo Molnar's avatar
Ingo Molnar committed
605

Peter Zijlstra's avatar
Peter Zijlstra committed
606 607 608
		if ((addr >= start) && (addr < end))
			return 1;
	}
609
#endif
Ingo Molnar's avatar
Ingo Molnar committed
610

Peter Zijlstra's avatar
Peter Zijlstra committed
611 612 613 614
	/*
	 * module var?
	 */
	return is_module_address(addr);
615 616
}

Ingo Molnar's avatar
Ingo Molnar committed
617
/*
Peter Zijlstra's avatar
Peter Zijlstra committed
618 619
 * To make lock name printouts unique, we calculate a unique
 * class->name_version generation counter:
Ingo Molnar's avatar
Ingo Molnar committed
620
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
621
static int count_matching_names(struct lock_class *new_class)
Ingo Molnar's avatar
Ingo Molnar committed
622
{
Peter Zijlstra's avatar
Peter Zijlstra committed
623 624
	struct lock_class *class;
	int count = 0;
Ingo Molnar's avatar
Ingo Molnar committed
625

Peter Zijlstra's avatar
Peter Zijlstra committed
626
	if (!new_class->name)
Ingo Molnar's avatar
Ingo Molnar committed
627 628
		return 0;

Peter Zijlstra's avatar
Peter Zijlstra committed
629 630 631 632 633 634
	list_for_each_entry(class, &all_lock_classes, lock_entry) {
		if (new_class->key - new_class->subclass == class->key)
			return class->name_version;
		if (class->name && !strcmp(class->name, new_class->name))
			count = max(count, class->name_version);
	}
Ingo Molnar's avatar
Ingo Molnar committed
635

Peter Zijlstra's avatar
Peter Zijlstra committed
636
	return count + 1;
Ingo Molnar's avatar
Ingo Molnar committed
637 638
}

Peter Zijlstra's avatar
Peter Zijlstra committed
639 640 641 642 643 644 645
/*
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
 */
static inline struct lock_class *
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
Ingo Molnar's avatar
Ingo Molnar committed
646
{
Peter Zijlstra's avatar
Peter Zijlstra committed
647 648 649
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct lock_class *class;
Ingo Molnar's avatar
Ingo Molnar committed
650

Peter Zijlstra's avatar
Peter Zijlstra committed
651 652 653 654 655 656 657 658 659
#ifdef CONFIG_DEBUG_LOCKDEP
	/*
	 * If the architecture calls into lockdep before initializing
	 * the hashes then we'll warn about it later. (we cannot printk
	 * right now)
	 */
	if (unlikely(!lockdep_initialized)) {
		lockdep_init();
		lockdep_init_error = 1;
660
		save_stack_trace(&lockdep_init_trace);
Peter Zijlstra's avatar
Peter Zijlstra committed
661 662
	}
#endif
Ingo Molnar's avatar
Ingo Molnar committed
663

Peter Zijlstra's avatar
Peter Zijlstra committed
664 665 666 667 668 669
	/*
	 * Static locks do not have their class-keys yet - for them the key
	 * is the lock object itself:
	 */
	if (unlikely(!lock->key))
		lock->key = (void *)lock;
Ingo Molnar's avatar
Ingo Molnar committed
670

Peter Zijlstra's avatar
Peter Zijlstra committed
671 672 673 674 675 676
	/*
	 * NOTE: the class-key must be unique. For dynamic locks, a static
	 * lock_class_key variable is passed in through the mutex_init()
	 * (or spin_lock_init()) call - which acts as the key. For static
	 * locks we use the lock object itself as the key.
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
677 678
	BUILD_BUG_ON(sizeof(struct lock_class_key) >
			sizeof(struct lockdep_map));
Ingo Molnar's avatar
Ingo Molnar committed
679

Peter Zijlstra's avatar
Peter Zijlstra committed
680
	key = lock->key->subkeys + subclass;
681

Peter Zijlstra's avatar
Peter Zijlstra committed
682
	hash_head = classhashentry(key);
683

Peter Zijlstra's avatar
Peter Zijlstra committed
684 685 686 687
	/*
	 * We can walk the hash lockfree, because the hash only
	 * grows, and we are careful when adding entries to the end:
	 */
Peter Zijlstra's avatar
Peter Zijlstra committed
688 689 690
	list_for_each_entry(class, hash_head, hash_entry) {
		if (class->key == key) {
			WARN_ON_ONCE(class->name != lock->name);
Peter Zijlstra's avatar
Peter Zijlstra committed
691
			return class;
Peter Zijlstra's avatar
Peter Zijlstra committed
692 693
		}
	}
Ingo Molnar's avatar
Ingo Molnar committed
694

Peter Zijlstra's avatar
Peter Zijlstra committed
695
	return NULL;
Ingo Molnar's avatar
Ingo Molnar committed
696 697 698
}

/*
Peter Zijlstra's avatar
Peter Zijlstra committed
699 700 701
 * Register a lock's class in the hash-table, if the class is not present
 * yet. Otherwise we look it up. We cache the result in the lock object
 * itself, so actual lookup of the hash should be once per lock object.
Ingo Molnar's avatar
Ingo Molnar committed
702
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
703 704
static inline struct lock_class *
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
Ingo Molnar's avatar
Ingo Molnar committed
705
{
Peter Zijlstra's avatar
Peter Zijlstra committed
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
	struct lockdep_subclass_key *key;
	struct list_head *hash_head;
	struct lock_class *class;
	unsigned long flags;

	class = look_up_lock_class(lock, subclass);
	if (likely(class))
		return class;

	/*
	 * Debug-check: all keys must be persistent!
 	 */
	if (!static_obj(lock->key)) {
		debug_locks_off();
		printk("INFO: trying to register non-static key.\n");
		printk("the code is fine but needs lockdep annotation.\n");
		printk("turning off the locking correctness validator.\n");
		dump_stack();

		return NULL;
	}

	key = lock->key->subkeys + subclass;
	hash_head = classhashentry(key);

	raw_local_irq_save(flags);
	if (!graph_lock()) {
		raw_local_irq_restore(flags);
		return NULL;
	}
	/*
	 * We have to do the hash-walk again, to avoid races
	 * with another CPU:
	 */
	list_for_each_entry(class, hash_head, hash_entry)
		if (class->key == key)
			goto out_unlock_set;
	/*
	 * Allocate a new key from the static array, and add it to
	 * the hash:
	 */
	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
		if (!debug_locks_off_graph_unlock()) {
			raw_local_irq_restore(flags);
			return NULL;
		}
		raw_local_irq_restore(flags);

		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
		printk("turning off the locking correctness validator.\n");
756
		dump_stack();
Peter Zijlstra's avatar
Peter Zijlstra committed
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
		return NULL;
	}
	class = lock_classes + nr_lock_classes++;
	debug_atomic_inc(&nr_unused_locks);
	class->key = key;
	class->name = lock->name;
	class->subclass = subclass;
	INIT_LIST_HEAD(&class->lock_entry);
	INIT_LIST_HEAD(&class->locks_before);
	INIT_LIST_HEAD(&class->locks_after);
	class->name_version = count_matching_names(class);
	/*
	 * We use RCU's safe list-add method to make
	 * parallel walking of the hash-list safe:
	 */
	list_add_tail_rcu(&class->hash_entry, hash_head);
773 774 775 776
	/*
	 * Add it to the global list of classes:
	 */
	list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
Peter Zijlstra's avatar
Peter Zijlstra committed
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819

	if (verbose(class)) {
		graph_unlock();
		raw_local_irq_restore(flags);

		printk("\nnew class %p: %s", class->key, class->name);
		if (class->name_version > 1)
			printk("#%d", class->name_version);
		printk("\n");
		dump_stack();

		raw_local_irq_save(flags);
		if (!graph_lock()) {
			raw_local_irq_restore(flags);
			return NULL;
		}
	}
out_unlock_set:
	graph_unlock();
	raw_local_irq_restore(flags);

	if (!subclass || force)
		lock->class_cache = class;

	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
		return NULL;

	return class;
}

#ifdef CONFIG_PROVE_LOCKING
/*
 * Allocate a lockdep entry. (assumes the graph_lock held, returns
 * with NULL on failure)
 */
static struct lock_list *alloc_list_entry(void)
{
	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
		if (!debug_locks_off_graph_unlock())
			return NULL;

		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
		printk("turning off the locking correctness validator.\n");
820
		dump_stack();
Peter Zijlstra's avatar
Peter Zijlstra committed
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
		return NULL;
	}
	return list_entries + nr_list_entries++;
}

/*
 * Add a new dependency to the head of the list:
 */
static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
			    struct list_head *head, unsigned long ip, int distance)
{
	struct lock_list *entry;
	/*
	 * Lock not present yet - get a new dependency struct and
	 * add it to the list:
	 */
	entry = alloc_list_entry();
	if (!entry)
		return 0;

	if (!save_trace(&entry->trace))
		return 0;

844 845
	entry->class = this;
	entry->distance = distance;
Peter Zijlstra's avatar
Peter Zijlstra committed
846 847 848 849 850 851 852 853 854 855 856 857
	/*
	 * Since we never remove from the dependency list, the list can
	 * be walked lockless by other CPUs, it's only allocation
	 * that must be protected by the spinlock. But this also means
	 * we must make new entries visible only once writes to the
	 * entry become visible - hence the RCU op:
	 */
	list_add_tail_rcu(&entry->entry, head);

	return 1;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
858 859 860
/*
 * For good efficiency of modular, we use power of 2
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
861 862 863
#define MAX_CIRCULAR_QUEUE_SIZE		4096UL
#define CQ_MASK				(MAX_CIRCULAR_QUEUE_SIZE-1)

Peter Zijlstra's avatar
Peter Zijlstra committed
864 865
/*
 * The circular_queue and helpers is used to implement the
Peter Zijlstra's avatar
Peter Zijlstra committed
866 867 868
 * breadth-first search(BFS)algorithem, by which we can build
 * the shortest path from the next lock to be acquired to the
 * previous held lock if there is a circular between them.
Peter Zijlstra's avatar
Peter Zijlstra committed
869
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
870 871 872 873 874 875 876
struct circular_queue {
	unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
	unsigned int  front, rear;
};

static struct circular_queue lock_cq;

877
unsigned int max_bfs_queue_depth;
Peter Zijlstra's avatar
Peter Zijlstra committed
878

879 880
static unsigned int lockdep_dependency_gen_id;

Peter Zijlstra's avatar
Peter Zijlstra committed
881 882 883
static inline void __cq_init(struct circular_queue *cq)
{
	cq->front = cq->rear = 0;
884
	lockdep_dependency_gen_id++;
Peter Zijlstra's avatar
Peter Zijlstra committed
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
}

static inline int __cq_empty(struct circular_queue *cq)
{
	return (cq->front == cq->rear);
}

static inline int __cq_full(struct circular_queue *cq)
{
	return ((cq->rear + 1) & CQ_MASK) == cq->front;
}

static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
{
	if (__cq_full(cq))
		return -1;

	cq->element[cq->rear] = elem;
	cq->rear = (cq->rear + 1) & CQ_MASK;
	return 0;
}

static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
{
	if (__cq_empty(cq))
		return -1;

	*elem = cq->element[cq->front];
	cq->front = (cq->front + 1) & CQ_MASK;
	return 0;
}

static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
{
	return (cq->rear - cq->front) & CQ_MASK;
}

static inline void mark_lock_accessed(struct lock_list *lock,
					struct lock_list *parent)
{
	unsigned long nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
926

Peter Zijlstra's avatar
Peter Zijlstra committed
927 928 929
	nr = lock - list_entries;
	WARN_ON(nr >= nr_list_entries);
	lock->parent = parent;
930
	lock->class->dep_gen_id = lockdep_dependency_gen_id;
Peter Zijlstra's avatar
Peter Zijlstra committed
931 932 933 934 935
}

static inline unsigned long lock_accessed(struct lock_list *lock)
{
	unsigned long nr;
Peter Zijlstra's avatar
Peter Zijlstra committed
936

Peter Zijlstra's avatar
Peter Zijlstra committed
937 938
	nr = lock - list_entries;
	WARN_ON(nr >= nr_list_entries);
939
	return lock->class->dep_gen_id == lockdep_dependency_gen_id;
Peter Zijlstra's avatar
Peter Zijlstra committed
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
}

static inline struct lock_list *get_lock_parent(struct lock_list *child)
{
	return child->parent;
}

static inline int get_lock_depth(struct lock_list *child)
{
	int depth = 0;
	struct lock_list *parent;

	while ((parent = get_lock_parent(child))) {
		child = parent;
		depth++;
	}
	return depth;
}

959
static int __bfs(struct lock_list *source_entry,
Peter Zijlstra's avatar
Peter Zijlstra committed
960 961 962 963
		 void *data,
		 int (*match)(struct lock_list *entry, void *data),
		 struct lock_list **target_entry,
		 int forward)
964 965
{
	struct lock_list *entry;
966
	struct list_head *head;
967 968 969
	struct circular_queue *cq = &lock_cq;
	int ret = 1;

970
	if (match(source_entry, data)) {
971 972 973 974 975
		*target_entry = source_entry;
		ret = 0;
		goto exit;
	}

976 977 978 979 980 981 982 983 984
	if (forward)
		head = &source_entry->class->locks_after;
	else
		head = &source_entry->class->locks_before;

	if (list_empty(head))
		goto exit;

	__cq_init(cq);
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
	__cq_enqueue(cq, (unsigned long)source_entry);

	while (!__cq_empty(cq)) {
		struct lock_list *lock;

		__cq_dequeue(cq, (unsigned long *)&lock);

		if (!lock->class) {
			ret = -2;
			goto exit;
		}

		if (forward)
			head = &lock->class->locks_after;
		else
			head = &lock->class->locks_before;

		list_for_each_entry(entry, head, entry) {
			if (!lock_accessed(entry)) {
1004
				unsigned int cq_depth;
1005
				mark_lock_accessed(entry, lock);
1006
				if (match(entry, data)) {
1007 1008 1009 1010 1011 1012 1013 1014 1015
					*target_entry = entry;
					ret = 0;
					goto exit;
				}

				if (__cq_enqueue(cq, (unsigned long)entry)) {
					ret = -1;
					goto exit;
				}
1016 1017 1018
				cq_depth = __cq_get_elem_count(cq);
				if (max_bfs_queue_depth < cq_depth)
					max_bfs_queue_depth = cq_depth;
1019 1020 1021 1022 1023 1024 1025
			}
		}
	}
exit:
	return ret;
}

1026
static inline int __bfs_forwards(struct lock_list *src_entry,
1027 1028 1029
			void *data,
			int (*match)(struct lock_list *entry, void *data),
			struct lock_list **target_entry)
1030
{
1031
	return __bfs(src_entry, data, match, target_entry, 1);
1032 1033 1034

}

1035
static inline int __bfs_backwards(struct lock_list *src_entry,
1036 1037 1038
			void *data,
			int (*match)(struct lock_list *entry, void *data),
			struct lock_list **target_entry)
1039
{
1040
	return __bfs(src_entry, data, match, target_entry, 0);
1041 1042 1043

}

Peter Zijlstra's avatar
Peter Zijlstra committed
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
/*
 * Recursive, forwards-direction lock-dependency checking, used for
 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
 * checking.
 */

/*
 * Print a dependency chain entry (this is only done when a deadlock
 * has been detected):
 */
static noinline int
1055
print_circular_bug_entry(struct lock_list *target, int depth)
Peter Zijlstra's avatar
Peter Zijlstra committed
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
{
	if (debug_locks_silent)
		return 0;
	printk("\n-> #%u", depth);
	print_lock_name(target->class);
	printk(":\n");
	print_stack_trace(&target->trace, 6);

	return 0;
}

/*
 * When a circular dependency is detected, print the
 * header first:
 */
static noinline int
1072 1073 1074
print_circular_bug_header(struct lock_list *entry, unsigned int depth,
			struct held_lock *check_src,
			struct held_lock *check_tgt)
Peter Zijlstra's avatar
Peter Zijlstra committed
1075 1076 1077
{
	struct task_struct *curr = current;

1078
	if (debug_locks_silent)
Peter Zijlstra's avatar
Peter Zijlstra committed
1079 1080 1081 1082 1083 1084 1085
		return 0;

	printk("\n=======================================================\n");
	printk(  "[ INFO: possible circular locking dependency detected ]\n");
	print_kernel_version();
	printk(  "-------------------------------------------------------\n");
	printk("%s/%d is trying to acquire lock:\n",
1086
		curr->comm, task_pid_nr(curr));
1087
	print_lock(check_src);
Peter Zijlstra's avatar
Peter Zijlstra committed
1088
	printk("\nbut task is already holding lock:\n");
1089
	print_lock(check_tgt);
Peter Zijlstra's avatar
Peter Zijlstra committed
1090 1091 1092 1093 1094 1095 1096 1097
	printk("\nwhich lock already depends on the new lock.\n\n");
	printk("\nthe existing dependency chain (in reverse order) is:\n");

	print_circular_bug_entry(entry, depth);

	return 0;
}

1098 1099 1100 1101 1102
static inline int class_equal(struct lock_list *entry, void *data)
{
	return entry->class == data;
}

1103 1104 1105 1106
static noinline int print_circular_bug(struct lock_list *this,
				struct lock_list *target,
				struct held_lock *check_src,
				struct held_lock *check_tgt)
Peter Zijlstra's avatar
Peter Zijlstra committed
1107 1108
{
	struct task_struct *curr = current;
1109
	struct lock_list *parent;
1110
	int depth;
Peter Zijlstra's avatar
Peter Zijlstra committed
1111

1112
	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
Peter Zijlstra's avatar
Peter Zijlstra committed
1113 1114
		return 0;

1115
	if (!save_trace(&this->trace))
Peter Zijlstra's avatar
Peter Zijlstra committed
1116 1117
		return 0;

1118 1119
	depth = get_lock_depth(target);

1120
	print_circular_bug_header(target, depth, check_src, check_tgt);
1121 1122 1123 1124 1125 1126 1127

	parent = get_lock_parent(target);

	while (parent) {
		print_circular_bug_entry(parent, --depth);
		parent = get_lock_parent(parent);
	}
Peter Zijlstra's avatar
Peter Zijlstra committed
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137

	printk("\nother info that might help us debug this:\n\n");
	lockdep_print_held_locks(curr);

	printk("\nstack backtrace:\n");
	dump_stack();

	return 0;
}

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
static noinline int print_bfs_bug(int ret)
{
	if (!debug_locks_off_graph_unlock())
		return 0;

	WARN(1, "lockdep bfs error:%d\n", ret);

	return 0;
}

1148
static int noop_count(struct lock_list *entry, void *data)
1149
{
1150 1151 1152
	(*(unsigned long *)data)++;
	return 0;
}
1153

1154 1155 1156 1157
unsigned long __lockdep_count_forward_deps(struct lock_list *this)
{
	unsigned long  count = 0;
	struct lock_list *uninitialized_var(target_entry);
1158

1159
	__bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1160

1161
	return count;
1162 1163 1164 1165
}
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
	unsigned long ret, flags;
1166 1167 1168 1169
	struct lock_list this;

	this.parent = NULL;
	this.class = class;
1170 1171 1172

	local_irq_save(flags);
	__raw_spin_lock(&lockdep_lock);
1173
	ret = __lockdep_count_forward_deps(&this);
1174 1175 1176 1177 1178 1179
	__raw_spin_unlock(&lockdep_lock);
	local_irq_restore(flags);

	return ret;
}

1180
unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1181
{
1182 1183
	unsigned long  count = 0;
	struct lock_list *uninitialized_var(target_entry);
1184

1185
	__bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1186

1187
	return count;
1188 1189 1190 1191 1192
}

unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
	unsigned long ret, flags;
1193 1194 1195 1196
	struct lock_list this;

	this.parent = NULL;
	this.class = class;
1197 1198 1199

	local_irq_save(flags);
	__raw_spin_lock(&lockdep_lock);
1200
	ret = __lockdep_count_backward_deps(&this);
1201 1202 1203 1204 1205 1206
	__raw_spin_unlock(&lockdep_lock);
	local_irq_restore(flags);

	return ret;
}

Peter Zijlstra's avatar
Peter Zijlstra committed
1207 1208 1209 1210 1211
/*
 * Prove that the dependency graph starting at <entry> can not
 * lead to <target>. Print an error and return 0 if it does.
 */
static noinline int
1212 1213
check_noncircular(struct lock_list *root, struct lock_class *target,
		struct lock_list **target_entry)
Peter Zijlstra's avatar
Peter Zijlstra committed
1214
{
1215
	int result;
Peter Zijlstra's avatar
Peter Zijlstra committed
1216

1217
	debug_atomic_inc(&nr_cyclic_checks);
1218

1219
	result = __bfs_forwards(root, target, class_equal, target_entry);
Ingo Molnar's avatar
Ingo Molnar committed
1220

1221 1222
	return result;
}
1223

1224
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
Ingo Molnar's avatar
Ingo Molnar committed
1225 1226 1227 1228 1229 1230
/*
 * Forwards and backwards subgraph searching, for the purposes of
 * proving that two subgraphs can be connected by a new dependency
 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
 */

1231 1232 1233 1234 1235 1236 1237
static inline int usage_match(struct lock_list *entry, void *bit)
{
	return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
}



Ingo Molnar's avatar
Ingo Molnar committed
1238 1239
/*
 * Find a node in the forwards-direction dependency sub-graph starting
1240
 * at @root->class that matches @bit.
Ingo Molnar's avatar
Ingo Molnar committed
1241
 *
1242 1243
 * Return 0 if such a node exists in the subgraph, and put that node
 * into *@target_entry.
Ingo Molnar's avatar
Ingo Molnar committed
1244
 *
1245 1246
 * Return 1 otherwise and keep *@target_entry unchanged.
 * Return <0 on error.
Ingo Molnar's avatar
Ingo Molnar committed
1247
 */
1248 1249 1250
static int
find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
			struct lock_list **target_entry)
Ingo Molnar's avatar
Ingo Molnar committed
1251
{
1252
	int result;
Ingo Molnar's avatar
Ingo Molnar committed
1253 1254 1255

	debug_atomic_inc(&nr_find_usage_forwards_checks);

1256 1257 1258
	result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);

	return result;
Ingo Molnar's avatar
Ingo Molnar committed
1259 1260 1261 1262
}

/*
 * Find a node in the backwards-direction dependency sub-graph starting
1263
 * at @root->class that matches @bit.
Ingo Molnar's avatar
Ingo Molnar committed
1264
 *
1265 1266
 * Return 0 if such a node exists in the subgraph, and put that node
 * into *@target_entry.
Ingo Molnar's avatar
Ingo Molnar committed
1267
 *
1268 1269
 * Return 1 otherwise and keep *@target_entry unchanged.
 * Return <0 on error.
Ingo Molnar's avatar
Ingo Molnar committed
1270
 */
1271 1272 1273
static int
find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
			struct lock_list **target_entry)
Ingo Molnar's avatar
Ingo Molnar committed
1274
{
1275
	int result;
Ingo Molnar's avatar
Ingo Molnar committed
1276 1277 1278