Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
/*
* kernel/lockdep.c
*
* Runtime locking correctness validator
*
* Started by Ingo Molnar:
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* this code maps all the lock dependencies as they occur in a live kernel
* and will warn about the following classes of locking bugs:
*
* - lock inversion scenarios
* - circular lock dependencies
* - hardirq/softirq safe/unsafe locking bugs
*
* Bugs are reported even if the current locking scenario does not cause
* any deadlock at this point.
*
* I.e. if anytime in the past two locks were taken in a different order,
* even if it happened for another task, even if those were different
* locks (but of the same class as this lock), this code will detect it.
*
* Thanks to Arjan van de Ven for coming up with the initial idea of
* mapping lock dependencies runtime.
*/
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
#include <asm/sections.h>
#include "lockdep_internals.h"
/*
* hash_lock: protects the lockdep hashes and class/list/hash allocators.
*
* This is one of the rare exceptions where it's justified
* to use a raw spinlock - we really dont want the spinlock
* code to recurse back into the lockdep code.
*/
static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static int lockdep_initialized;
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
/*
* Allocate a lockdep entry. (assumes hash_lock held, returns
* with NULL on failure)
*/
static struct lock_list *alloc_list_entry(void)
{
if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
__raw_spin_unlock(&hash_lock);
debug_locks_off();
printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
printk("turning off the locking correctness validator.\n");
return NULL;
}
return list_entries + nr_list_entries++;
}
/*
* All data structures here are protected by the global debug_lock.
*
* Mutex key structs only get allocated, once during bootup, and never
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
/*
* We keep a global list of all lock classes. The list only grows,
* never shrinks. The list is only accessed with the lockdep
* spinlock lock held.
*/
LIST_HEAD(all_lock_classes);
/*
* The lockdep classes are in a hash-table as well, for fast lookup:
*/
#define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1)
#define CLASSHASH_SIZE (1UL << CLASSHASH_BITS)
#define CLASSHASH_MASK (CLASSHASH_SIZE - 1)
#define __classhashfn(key) ((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
static struct list_head classhash_table[CLASSHASH_SIZE];
unsigned long nr_lock_chains;
static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
/*
* We put the lock dependency chains into a hash-table as well, to cache
* their existence:
*/
#define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1)
#define CHAINHASH_SIZE (1UL << CHAINHASH_BITS)
#define CHAINHASH_MASK (CHAINHASH_SIZE - 1)
#define __chainhashfn(chain) \
(((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
static struct list_head chainhash_table[CHAINHASH_SIZE];
/*
* The hash key of the lock dependency chains is a hash itself too:
* it's a hash of all locks taken up to that lock, including that lock.
* It's a 64-bit hash, because it's important for the keys to be
* unique.
*/
#define iterate_chain_key(key1, key2) \
(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
(key2))
void lockdep_off(void)
{
current->lockdep_recursion++;
}
EXPORT_SYMBOL(lockdep_off);
void lockdep_on(void)
{
current->lockdep_recursion--;
}
EXPORT_SYMBOL(lockdep_on);
/*
* Debugging switches:
*/
#define VERBOSE 0
#if VERBOSE
# define HARDIRQ_VERBOSE 1
# define SOFTIRQ_VERBOSE 1
#else
# define HARDIRQ_VERBOSE 0
# define SOFTIRQ_VERBOSE 0
#endif
#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
/*
* Quick filtering for interesting events:
*/
static int class_filter(struct lock_class *class)
{
!strcmp(class->name, "&struct->lockfield"))
/* Filter everything else. 1 would be to allow everything else */
return 0;
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
}
#endif
static int verbose(struct lock_class *class)
{
#if VERBOSE
return class_filter(class);
#endif
return 0;
}
#ifdef CONFIG_TRACE_IRQFLAGS
static int hardirq_verbose(struct lock_class *class)
{
#if HARDIRQ_VERBOSE
return class_filter(class);
#endif
return 0;
}
static int softirq_verbose(struct lock_class *class)
{
#if SOFTIRQ_VERBOSE
return class_filter(class);
#endif
Loading
Loading full blame...