Rev 5056 | Rev 6336 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5056 | Rev 5270 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Runtime locking correctness validator |
2 | * Runtime locking correctness validator |
3 | * |
3 | * |
4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar |
4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar |
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
6 | * |
6 | * |
7 | * see Documentation/lockdep-design.txt for more details. |
7 | * see Documentation/locking/lockdep-design.txt for more details. |
8 | */ |
8 | */ |
9 | #ifndef __LINUX_LOCKDEP_H |
9 | #ifndef __LINUX_LOCKDEP_H |
10 | #define __LINUX_LOCKDEP_H |
10 | #define __LINUX_LOCKDEP_H |
11 | 11 | ||
12 | struct task_struct; |
12 | struct task_struct; |
13 | struct lockdep_map; |
13 | struct lockdep_map; |
- | 14 | ||
- | 15 | /* for sysctl */ |
|
- | 16 | extern int prove_locking; |
|
- | 17 | extern int lock_stat; |
|
14 | 18 | ||
15 | #ifdef CONFIG_LOCKDEP |
19 | #ifdef CONFIG_LOCKDEP |
16 | 20 | ||
17 | #include |
21 | #include |
18 | #include |
22 | #include |
19 | #include |
23 | #include |
20 | #include |
24 | #include |
21 | 25 | ||
22 | /* |
26 | /* |
23 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
27 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
24 | * the total number of states... :-( |
28 | * the total number of states... :-( |
25 | */ |
29 | */ |
26 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
30 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
27 | 31 | ||
28 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
29 | 33 | ||
30 | /* |
34 | /* |
31 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
32 | * cached in the instance of lockdep_map |
36 | * cached in the instance of lockdep_map |
33 | * |
37 | * |
34 | * Currently main class (subclass == 0) and signle depth subclass |
38 | * Currently main class (subclass == 0) and signle depth subclass |
35 | * are cached in lockdep_map. This optimization is mainly targeting |
39 | * are cached in lockdep_map. This optimization is mainly targeting |
36 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
37 | * single depth. |
41 | * single depth. |
38 | */ |
42 | */ |
39 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
40 | 44 | ||
41 | /* |
45 | /* |
42 | * Lock-classes are keyed via unique addresses, by embedding the |
46 | * Lock-classes are keyed via unique addresses, by embedding the |
43 | * lockclass-key into the kernel (or module) .data section. (For |
47 | * lockclass-key into the kernel (or module) .data section. (For |
44 | * static locks we use the lock address itself as the key.) |
48 | * static locks we use the lock address itself as the key.) |
45 | */ |
49 | */ |
46 | struct lockdep_subclass_key { |
50 | struct lockdep_subclass_key { |
47 | char __one_byte; |
51 | char __one_byte; |
48 | } __attribute__ ((__packed__)); |
52 | } __attribute__ ((__packed__)); |
49 | 53 | ||
50 | struct lock_class_key { |
54 | struct lock_class_key { |
51 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
55 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
52 | }; |
56 | }; |
- | 57 | ||
- | 58 | extern struct lock_class_key __lockdep_no_validate__; |
|
53 | 59 | ||
54 | #define LOCKSTAT_POINTS 4 |
60 | #define LOCKSTAT_POINTS 4 |
55 | 61 | ||
56 | /* |
62 | /* |
57 | * The lock-class itself: |
63 | * The lock-class itself: |
58 | */ |
64 | */ |
59 | struct lock_class { |
65 | struct lock_class { |
60 | /* |
66 | /* |
61 | * class-hash: |
67 | * class-hash: |
62 | */ |
68 | */ |
63 | struct list_head hash_entry; |
69 | struct list_head hash_entry; |
64 | 70 | ||
65 | /* |
71 | /* |
66 | * global list of all lock-classes: |
72 | * global list of all lock-classes: |
67 | */ |
73 | */ |
68 | struct list_head lock_entry; |
74 | struct list_head lock_entry; |
69 | 75 | ||
70 | struct lockdep_subclass_key *key; |
76 | struct lockdep_subclass_key *key; |
71 | unsigned int subclass; |
77 | unsigned int subclass; |
72 | unsigned int dep_gen_id; |
78 | unsigned int dep_gen_id; |
73 | 79 | ||
74 | /* |
80 | /* |
75 | * IRQ/softirq usage tracking bits: |
81 | * IRQ/softirq usage tracking bits: |
76 | */ |
82 | */ |
77 | unsigned long usage_mask; |
83 | unsigned long usage_mask; |
78 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
84 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
79 | 85 | ||
80 | /* |
86 | /* |
81 | * These fields represent a directed graph of lock dependencies, |
87 | * These fields represent a directed graph of lock dependencies, |
82 | * to every node we attach a list of "forward" and a list of |
88 | * to every node we attach a list of "forward" and a list of |
83 | * "backward" graph nodes. |
89 | * "backward" graph nodes. |
84 | */ |
90 | */ |
85 | struct list_head locks_after, locks_before; |
91 | struct list_head locks_after, locks_before; |
86 | 92 | ||
87 | /* |
93 | /* |
88 | * Generation counter, when doing certain classes of graph walking, |
94 | * Generation counter, when doing certain classes of graph walking, |
89 | * to ensure that we check one node only once: |
95 | * to ensure that we check one node only once: |
90 | */ |
96 | */ |
91 | unsigned int version; |
97 | unsigned int version; |
92 | 98 | ||
93 | /* |
99 | /* |
94 | * Statistics counter: |
100 | * Statistics counter: |
95 | */ |
101 | */ |
96 | unsigned long ops; |
102 | unsigned long ops; |
97 | 103 | ||
98 | const char *name; |
104 | const char *name; |
99 | int name_version; |
105 | int name_version; |
100 | 106 | ||
101 | #ifdef CONFIG_LOCK_STAT |
107 | #ifdef CONFIG_LOCK_STAT |
102 | unsigned long contention_point[LOCKSTAT_POINTS]; |
108 | unsigned long contention_point[LOCKSTAT_POINTS]; |
103 | unsigned long contending_point[LOCKSTAT_POINTS]; |
109 | unsigned long contending_point[LOCKSTAT_POINTS]; |
104 | #endif |
110 | #endif |
105 | }; |
111 | }; |
106 | 112 | ||
107 | #ifdef CONFIG_LOCK_STAT |
113 | #ifdef CONFIG_LOCK_STAT |
108 | struct lock_time { |
114 | struct lock_time { |
109 | s64 min; |
115 | s64 min; |
110 | s64 max; |
116 | s64 max; |
111 | s64 total; |
117 | s64 total; |
112 | unsigned long nr; |
118 | unsigned long nr; |
113 | }; |
119 | }; |
114 | 120 | ||
115 | enum bounce_type { |
121 | enum bounce_type { |
116 | bounce_acquired_write, |
122 | bounce_acquired_write, |
117 | bounce_acquired_read, |
123 | bounce_acquired_read, |
118 | bounce_contended_write, |
124 | bounce_contended_write, |
119 | bounce_contended_read, |
125 | bounce_contended_read, |
120 | nr_bounce_types, |
126 | nr_bounce_types, |
121 | 127 | ||
122 | bounce_acquired = bounce_acquired_write, |
128 | bounce_acquired = bounce_acquired_write, |
123 | bounce_contended = bounce_contended_write, |
129 | bounce_contended = bounce_contended_write, |
124 | }; |
130 | }; |
125 | 131 | ||
126 | struct lock_class_stats { |
132 | struct lock_class_stats { |
127 | unsigned long contention_point[4]; |
133 | unsigned long contention_point[4]; |
128 | unsigned long contending_point[4]; |
134 | unsigned long contending_point[4]; |
129 | struct lock_time read_waittime; |
135 | struct lock_time read_waittime; |
130 | struct lock_time write_waittime; |
136 | struct lock_time write_waittime; |
131 | struct lock_time read_holdtime; |
137 | struct lock_time read_holdtime; |
132 | struct lock_time write_holdtime; |
138 | struct lock_time write_holdtime; |
133 | unsigned long bounces[nr_bounce_types]; |
139 | unsigned long bounces[nr_bounce_types]; |
134 | }; |
140 | }; |
135 | 141 | ||
136 | struct lock_class_stats lock_stats(struct lock_class *class); |
142 | struct lock_class_stats lock_stats(struct lock_class *class); |
137 | void clear_lock_stats(struct lock_class *class); |
143 | void clear_lock_stats(struct lock_class *class); |
138 | #endif |
144 | #endif |
139 | 145 | ||
140 | /* |
146 | /* |
141 | * Map the lock object (the lock instance) to the lock-class object. |
147 | * Map the lock object (the lock instance) to the lock-class object. |
142 | * This is embedded into specific lock instances: |
148 | * This is embedded into specific lock instances: |
143 | */ |
149 | */ |
144 | struct lockdep_map { |
150 | struct lockdep_map { |
145 | struct lock_class_key *key; |
151 | struct lock_class_key *key; |
146 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
147 | const char *name; |
153 | const char *name; |
148 | #ifdef CONFIG_LOCK_STAT |
154 | #ifdef CONFIG_LOCK_STAT |
149 | int cpu; |
155 | int cpu; |
150 | unsigned long ip; |
156 | unsigned long ip; |
151 | #endif |
157 | #endif |
152 | }; |
158 | }; |
- | 159 | ||
- | 160 | static inline void lockdep_copy_map(struct lockdep_map *to, |
|
- | 161 | struct lockdep_map *from) |
|
- | 162 | { |
|
- | 163 | int i; |
|
- | 164 | ||
- | 165 | *to = *from; |
|
- | 166 | /* |
|
- | 167 | * Since the class cache can be modified concurrently we could observe |
|
- | 168 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
|
- | 169 | * the caches and take the performance hit. |
|
- | 170 | * |
|
- | 171 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
|
- | 172 | * that relies on cache abuse. |
|
- | 173 | */ |
|
- | 174 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
|
- | 175 | to->class_cache[i] = NULL; |
|
- | 176 | } |
|
153 | 177 | ||
154 | /* |
178 | /* |
155 | * Every lock has a list of other locks that were taken after it. |
179 | * Every lock has a list of other locks that were taken after it. |
156 | * We only grow the list, never remove from it: |
180 | * We only grow the list, never remove from it: |
157 | */ |
181 | */ |
158 | struct lock_list { |
182 | struct lock_list { |
159 | struct list_head entry; |
183 | struct list_head entry; |
160 | struct lock_class *class; |
184 | struct lock_class *class; |
161 | struct stack_trace trace; |
185 | struct stack_trace trace; |
162 | int distance; |
186 | int distance; |
163 | 187 | ||
164 | /* |
188 | /* |
165 | * The parent field is used to implement breadth-first search, and the |
189 | * The parent field is used to implement breadth-first search, and the |
166 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
190 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
167 | */ |
191 | */ |
168 | struct lock_list *parent; |
192 | struct lock_list *parent; |
169 | }; |
193 | }; |
170 | 194 | ||
171 | /* |
195 | /* |
172 | * We record lock dependency chains, so that we can cache them: |
196 | * We record lock dependency chains, so that we can cache them: |
173 | */ |
197 | */ |
174 | struct lock_chain { |
198 | struct lock_chain { |
175 | u8 irq_context; |
199 | u8 irq_context; |
176 | u8 depth; |
200 | u8 depth; |
177 | u16 base; |
201 | u16 base; |
178 | struct list_head entry; |
202 | struct list_head entry; |
179 | u64 chain_key; |
203 | u64 chain_key; |
180 | }; |
204 | }; |
181 | 205 | ||
182 | #define MAX_LOCKDEP_KEYS_BITS 13 |
206 | #define MAX_LOCKDEP_KEYS_BITS 13 |
183 | /* |
207 | /* |
184 | * Subtract one because we offset hlock->class_idx by 1 in order |
208 | * Subtract one because we offset hlock->class_idx by 1 in order |
185 | * to make 0 mean no class. This avoids overflowing the class_idx |
209 | * to make 0 mean no class. This avoids overflowing the class_idx |
186 | * bitfield and hitting the BUG in hlock_class(). |
210 | * bitfield and hitting the BUG in hlock_class(). |
187 | */ |
211 | */ |
188 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
212 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
189 | 213 | ||
190 | struct held_lock { |
214 | struct held_lock { |
191 | /* |
215 | /* |
192 | * One-way hash of the dependency chain up to this point. We |
216 | * One-way hash of the dependency chain up to this point. We |
193 | * hash the hashes step by step as the dependency chain grows. |
217 | * hash the hashes step by step as the dependency chain grows. |
194 | * |
218 | * |
195 | * We use it for dependency-caching and we skip detection |
219 | * We use it for dependency-caching and we skip detection |
196 | * passes and dependency-updates if there is a cache-hit, so |
220 | * passes and dependency-updates if there is a cache-hit, so |
197 | * it is absolutely critical for 100% coverage of the validator |
221 | * it is absolutely critical for 100% coverage of the validator |
198 | * to have a unique key value for every unique dependency path |
222 | * to have a unique key value for every unique dependency path |
199 | * that can occur in the system, to make a unique hash value |
223 | * that can occur in the system, to make a unique hash value |
200 | * as likely as possible - hence the 64-bit width. |
224 | * as likely as possible - hence the 64-bit width. |
201 | * |
225 | * |
202 | * The task struct holds the current hash value (initialized |
226 | * The task struct holds the current hash value (initialized |
203 | * with zero), here we store the previous hash value: |
227 | * with zero), here we store the previous hash value: |
204 | */ |
228 | */ |
205 | u64 prev_chain_key; |
229 | u64 prev_chain_key; |
206 | unsigned long acquire_ip; |
230 | unsigned long acquire_ip; |
207 | struct lockdep_map *instance; |
231 | struct lockdep_map *instance; |
208 | struct lockdep_map *nest_lock; |
232 | struct lockdep_map *nest_lock; |
209 | #ifdef CONFIG_LOCK_STAT |
233 | #ifdef CONFIG_LOCK_STAT |
210 | u64 waittime_stamp; |
234 | u64 waittime_stamp; |
211 | u64 holdtime_stamp; |
235 | u64 holdtime_stamp; |
212 | #endif |
236 | #endif |
213 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
237 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
214 | /* |
238 | /* |
215 | * The lock-stack is unified in that the lock chains of interrupt |
239 | * The lock-stack is unified in that the lock chains of interrupt |
216 | * contexts nest ontop of process context chains, but we 'separate' |
240 | * contexts nest ontop of process context chains, but we 'separate' |
217 | * the hashes by starting with 0 if we cross into an interrupt |
241 | * the hashes by starting with 0 if we cross into an interrupt |
218 | * context, and we also keep do not add cross-context lock |
242 | * context, and we also keep do not add cross-context lock |
219 | * dependencies - the lock usage graph walking covers that area |
243 | * dependencies - the lock usage graph walking covers that area |
220 | * anyway, and we'd just unnecessarily increase the number of |
244 | * anyway, and we'd just unnecessarily increase the number of |
221 | * dependencies otherwise. [Note: hardirq and softirq contexts |
245 | * dependencies otherwise. [Note: hardirq and softirq contexts |
222 | * are separated from each other too.] |
246 | * are separated from each other too.] |
223 | * |
247 | * |
224 | * The following field is used to detect when we cross into an |
248 | * The following field is used to detect when we cross into an |
225 | * interrupt context: |
249 | * interrupt context: |
226 | */ |
250 | */ |
227 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
251 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
228 | unsigned int trylock:1; /* 16 bits */ |
252 | unsigned int trylock:1; /* 16 bits */ |
229 | 253 | ||
230 | unsigned int read:2; /* see lock_acquire() comment */ |
254 | unsigned int read:2; /* see lock_acquire() comment */ |
231 | unsigned int check:1; /* see lock_acquire() comment */ |
255 | unsigned int check:1; /* see lock_acquire() comment */ |
232 | unsigned int hardirqs_off:1; |
256 | unsigned int hardirqs_off:1; |
233 | unsigned int references:12; /* 32 bits */ |
257 | unsigned int references:12; /* 32 bits */ |
234 | }; |
258 | }; |
235 | 259 | ||
236 | /* |
260 | /* |
237 | * Initialization, self-test and debugging-output methods: |
261 | * Initialization, self-test and debugging-output methods: |
238 | */ |
262 | */ |
239 | extern void lockdep_init(void); |
263 | extern void lockdep_init(void); |
240 | extern void lockdep_info(void); |
264 | extern void lockdep_info(void); |
241 | extern void lockdep_reset(void); |
265 | extern void lockdep_reset(void); |
242 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
266 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
243 | extern void lockdep_free_key_range(void *start, unsigned long size); |
267 | extern void lockdep_free_key_range(void *start, unsigned long size); |
244 | extern asmlinkage void lockdep_sys_exit(void); |
268 | extern asmlinkage void lockdep_sys_exit(void); |
245 | 269 | ||
246 | extern void lockdep_off(void); |
270 | extern void lockdep_off(void); |
247 | extern void lockdep_on(void); |
271 | extern void lockdep_on(void); |
248 | 272 | ||
249 | /* |
273 | /* |
250 | * These methods are used by specific locking variants (spinlocks, |
274 | * These methods are used by specific locking variants (spinlocks, |
251 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
275 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
252 | * to lockdep: |
276 | * to lockdep: |
253 | */ |
277 | */ |
254 | 278 | ||
255 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
279 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
256 | struct lock_class_key *key, int subclass); |
280 | struct lock_class_key *key, int subclass); |
257 | 281 | ||
258 | /* |
282 | /* |
259 | * To initialize a lockdep_map statically use this macro. |
283 | * To initialize a lockdep_map statically use this macro. |
260 | * Note that _name must not be NULL. |
284 | * Note that _name must not be NULL. |
261 | */ |
285 | */ |
262 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
286 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
263 | { .name = (_name), .key = (void *)(_key), } |
287 | { .name = (_name), .key = (void *)(_key), } |
264 | 288 | ||
265 | /* |
289 | /* |
266 | * Reinitialize a lock key - for cases where there is special locking or |
290 | * Reinitialize a lock key - for cases where there is special locking or |
267 | * special initialization of locks so that the validator gets the scope |
291 | * special initialization of locks so that the validator gets the scope |
268 | * of dependencies wrong: they are either too broad (they need a class-split) |
292 | * of dependencies wrong: they are either too broad (they need a class-split) |
269 | * or they are too narrow (they suffer from a false class-split): |
293 | * or they are too narrow (they suffer from a false class-split): |
270 | */ |
294 | */ |
271 | #define lockdep_set_class(lock, key) \ |
295 | #define lockdep_set_class(lock, key) \ |
272 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
296 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
273 | #define lockdep_set_class_and_name(lock, key, name) \ |
297 | #define lockdep_set_class_and_name(lock, key, name) \ |
274 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
298 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
275 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
299 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
276 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
300 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
277 | #define lockdep_set_subclass(lock, sub) \ |
301 | #define lockdep_set_subclass(lock, sub) \ |
278 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
302 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
279 | (lock)->dep_map.key, sub) |
303 | (lock)->dep_map.key, sub) |
280 | 304 | ||
281 | #define lockdep_set_novalidate_class(lock) \ |
305 | #define lockdep_set_novalidate_class(lock) \ |
282 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
306 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
283 | /* |
307 | /* |
284 | * Compare locking classes |
308 | * Compare locking classes |
285 | */ |
309 | */ |
286 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
310 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
287 | 311 | ||
288 | static inline int lockdep_match_key(struct lockdep_map *lock, |
312 | static inline int lockdep_match_key(struct lockdep_map *lock, |
289 | struct lock_class_key *key) |
313 | struct lock_class_key *key) |
290 | { |
314 | { |
291 | return lock->key == key; |
315 | return lock->key == key; |
292 | } |
316 | } |
293 | 317 | ||
294 | /* |
318 | /* |
295 | * Acquire a lock. |
319 | * Acquire a lock. |
296 | * |
320 | * |
297 | * Values for "read": |
321 | * Values for "read": |
298 | * |
322 | * |
299 | * 0: exclusive (write) acquire |
323 | * 0: exclusive (write) acquire |
300 | * 1: read-acquire (no recursion allowed) |
324 | * 1: read-acquire (no recursion allowed) |
301 | * 2: read-acquire with same-instance recursion allowed |
325 | * 2: read-acquire with same-instance recursion allowed |
302 | * |
326 | * |
303 | * Values for check: |
327 | * Values for check: |
304 | * |
328 | * |
305 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
329 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
306 | * 1: full validation |
330 | * 1: full validation |
307 | */ |
331 | */ |
308 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
332 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
309 | int trylock, int read, int check, |
333 | int trylock, int read, int check, |
310 | struct lockdep_map *nest_lock, unsigned long ip); |
334 | struct lockdep_map *nest_lock, unsigned long ip); |
311 | 335 | ||
312 | extern void lock_release(struct lockdep_map *lock, int nested, |
336 | extern void lock_release(struct lockdep_map *lock, int nested, |
313 | unsigned long ip); |
337 | unsigned long ip); |
314 | 338 | ||
315 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
339 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
316 | 340 | ||
317 | extern int lock_is_held(struct lockdep_map *lock); |
341 | extern int lock_is_held(struct lockdep_map *lock); |
318 | 342 | ||
319 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
343 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
320 | struct lock_class_key *key, unsigned int subclass, |
344 | struct lock_class_key *key, unsigned int subclass, |
321 | unsigned long ip); |
345 | unsigned long ip); |
322 | 346 | ||
323 | static inline void lock_set_subclass(struct lockdep_map *lock, |
347 | static inline void lock_set_subclass(struct lockdep_map *lock, |
324 | unsigned int subclass, unsigned long ip) |
348 | unsigned int subclass, unsigned long ip) |
325 | { |
349 | { |
326 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
350 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
327 | } |
351 | } |
328 | 352 | ||
329 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
353 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
330 | extern void lockdep_clear_current_reclaim_state(void); |
354 | extern void lockdep_clear_current_reclaim_state(void); |
331 | extern void lockdep_trace_alloc(gfp_t mask); |
355 | extern void lockdep_trace_alloc(gfp_t mask); |
332 | 356 | ||
333 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
357 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
334 | 358 | ||
335 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
359 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
336 | 360 | ||
337 | #define lockdep_assert_held(l) do { \ |
361 | #define lockdep_assert_held(l) do { \ |
338 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
362 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
339 | } while (0) |
363 | } while (0) |
- | 364 | ||
- | 365 | #define lockdep_assert_held_once(l) do { \ |
|
- | 366 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
|
- | 367 | } while (0) |
|
340 | 368 | ||
341 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
369 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
342 | 370 | ||
343 | #else /* !CONFIG_LOCKDEP */ |
371 | #else /* !CONFIG_LOCKDEP */ |
344 | 372 | ||
345 | static inline void lockdep_off(void) |
373 | static inline void lockdep_off(void) |
346 | { |
374 | { |
347 | } |
375 | } |
348 | 376 | ||
349 | static inline void lockdep_on(void) |
377 | static inline void lockdep_on(void) |
350 | { |
378 | { |
351 | } |
379 | } |
352 | 380 | ||
353 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
381 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
354 | # define lock_release(l, n, i) do { } while (0) |
382 | # define lock_release(l, n, i) do { } while (0) |
355 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
383 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
356 | # define lock_set_subclass(l, s, i) do { } while (0) |
384 | # define lock_set_subclass(l, s, i) do { } while (0) |
357 | # define lockdep_set_current_reclaim_state(g) do { } while (0) |
385 | # define lockdep_set_current_reclaim_state(g) do { } while (0) |
358 | # define lockdep_clear_current_reclaim_state() do { } while (0) |
386 | # define lockdep_clear_current_reclaim_state() do { } while (0) |
359 | # define lockdep_trace_alloc(g) do { } while (0) |
387 | # define lockdep_trace_alloc(g) do { } while (0) |
360 | # define lockdep_init() do { } while (0) |
388 | # define lockdep_init() do { } while (0) |
361 | # define lockdep_info() do { } while (0) |
389 | # define lockdep_info() do { } while (0) |
362 | # define lockdep_init_map(lock, name, key, sub) \ |
390 | # define lockdep_init_map(lock, name, key, sub) \ |
363 | do { (void)(name); (void)(key); } while (0) |
391 | do { (void)(name); (void)(key); } while (0) |
364 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
392 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
365 | # define lockdep_set_class_and_name(lock, key, name) \ |
393 | # define lockdep_set_class_and_name(lock, key, name) \ |
366 | do { (void)(key); (void)(name); } while (0) |
394 | do { (void)(key); (void)(name); } while (0) |
367 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
395 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
368 | do { (void)(key); } while (0) |
396 | do { (void)(key); } while (0) |
369 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
397 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
370 | 398 | ||
371 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
399 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
372 | 400 | ||
373 | /* |
401 | /* |
374 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
402 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
375 | * case since the result is not well defined and the caller should rather |
403 | * case since the result is not well defined and the caller should rather |
376 | * #ifdef the call himself. |
404 | * #ifdef the call himself. |
377 | */ |
405 | */ |
378 | 406 | ||
379 | # define INIT_LOCKDEP |
407 | # define INIT_LOCKDEP |
380 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
408 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
381 | # define lockdep_free_key_range(start, size) do { } while (0) |
409 | # define lockdep_free_key_range(start, size) do { } while (0) |
382 | # define lockdep_sys_exit() do { } while (0) |
410 | # define lockdep_sys_exit() do { } while (0) |
383 | /* |
411 | /* |
384 | * The class key takes no space if lockdep is disabled: |
412 | * The class key takes no space if lockdep is disabled: |
385 | */ |
413 | */ |
386 | struct lock_class_key { }; |
414 | struct lock_class_key { }; |
387 | 415 | ||
388 | #define lockdep_depth(tsk) (0) |
416 | #define lockdep_depth(tsk) (0) |
389 | 417 | ||
390 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
418 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
- | 419 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
|
391 | 420 | ||
392 | #define lockdep_recursing(tsk) (0) |
421 | #define lockdep_recursing(tsk) (0) |
393 | 422 | ||
394 | #endif /* !LOCKDEP */ |
423 | #endif /* !LOCKDEP */ |
395 | 424 | ||
396 | #ifdef CONFIG_LOCK_STAT |
425 | #ifdef CONFIG_LOCK_STAT |
397 | 426 | ||
398 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
427 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
399 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
428 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
400 | 429 | ||
401 | #define LOCK_CONTENDED(_lock, try, lock) \ |
430 | #define LOCK_CONTENDED(_lock, try, lock) \ |
402 | do { \ |
431 | do { \ |
403 | if (!try(_lock)) { \ |
432 | if (!try(_lock)) { \ |
404 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
433 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
405 | lock(_lock); \ |
434 | lock(_lock); \ |
406 | } \ |
435 | } \ |
407 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
436 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
408 | } while (0) |
437 | } while (0) |
409 | 438 | ||
410 | #else /* CONFIG_LOCK_STAT */ |
439 | #else /* CONFIG_LOCK_STAT */ |
411 | 440 | ||
412 | #define lock_contended(lockdep_map, ip) do {} while (0) |
441 | #define lock_contended(lockdep_map, ip) do {} while (0) |
413 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
442 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
414 | 443 | ||
415 | #define LOCK_CONTENDED(_lock, try, lock) \ |
444 | #define LOCK_CONTENDED(_lock, try, lock) \ |
416 | lock(_lock) |
445 | lock(_lock) |
417 | 446 | ||
418 | #endif /* CONFIG_LOCK_STAT */ |
447 | #endif /* CONFIG_LOCK_STAT */ |
419 | 448 | ||
420 | #ifdef CONFIG_LOCKDEP |
449 | #ifdef CONFIG_LOCKDEP |
421 | 450 | ||
422 | /* |
451 | /* |
423 | * On lockdep we dont want the hand-coded irq-enable of |
452 | * On lockdep we dont want the hand-coded irq-enable of |
424 | * _raw_*_lock_flags() code, because lockdep assumes |
453 | * _raw_*_lock_flags() code, because lockdep assumes |
425 | * that interrupts are not re-enabled during lock-acquire: |
454 | * that interrupts are not re-enabled during lock-acquire: |
426 | */ |
455 | */ |
427 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
456 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
428 | LOCK_CONTENDED((_lock), (try), (lock)) |
457 | LOCK_CONTENDED((_lock), (try), (lock)) |
429 | 458 | ||
430 | #else /* CONFIG_LOCKDEP */ |
459 | #else /* CONFIG_LOCKDEP */ |
431 | 460 | ||
432 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
461 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
433 | lockfl((_lock), (flags)) |
462 | lockfl((_lock), (flags)) |
434 | 463 | ||
435 | #endif /* CONFIG_LOCKDEP */ |
464 | #endif /* CONFIG_LOCKDEP */ |
436 | 465 | ||
437 | #ifdef CONFIG_TRACE_IRQFLAGS |
466 | #ifdef CONFIG_TRACE_IRQFLAGS |
438 | extern void print_irqtrace_events(struct task_struct *curr); |
467 | extern void print_irqtrace_events(struct task_struct *curr); |
439 | #else |
468 | #else |
440 | static inline void print_irqtrace_events(struct task_struct *curr) |
469 | static inline void print_irqtrace_events(struct task_struct *curr) |
441 | { |
470 | { |
442 | } |
471 | } |
443 | #endif |
472 | #endif |
444 | 473 | ||
445 | /* |
474 | /* |
446 | * For trivial one-depth nesting of a lock-class, the following |
475 | * For trivial one-depth nesting of a lock-class, the following |
447 | * global define can be used. (Subsystems with multiple levels |
476 | * global define can be used. (Subsystems with multiple levels |
448 | * of nesting should define their own lock-nesting subclasses.) |
477 | * of nesting should define their own lock-nesting subclasses.) |
449 | */ |
478 | */ |
450 | #define SINGLE_DEPTH_NESTING 1 |
479 | #define SINGLE_DEPTH_NESTING 1 |
451 | 480 | ||
452 | /* |
481 | /* |
453 | * Map the dependency ops to NOP or to real lockdep ops, depending |
482 | * Map the dependency ops to NOP or to real lockdep ops, depending |
454 | * on the per lock-class debug mode: |
483 | * on the per lock-class debug mode: |
455 | */ |
484 | */ |
456 | - | ||
457 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
- | |
458 | # ifdef CONFIG_PROVE_LOCKING |
- | |
459 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
485 | |
460 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
- | |
461 | # else |
486 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
462 | # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
487 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
463 | # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
488 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
464 | # endif |
489 | |
465 | # define spin_release(l, n, i) lock_release(l, n, i) |
- | |
466 | #else |
490 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
467 | # define spin_acquire(l, s, t, i) do { } while (0) |
491 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
468 | # define spin_release(l, n, i) do { } while (0) |
- | |
469 | #endif |
492 | #define spin_release(l, n, i) lock_release(l, n, i) |
470 | - | ||
471 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
- | |
472 | # ifdef CONFIG_PROVE_LOCKING |
493 | |
473 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
- | |
474 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
- | |
475 | # else |
- | |
476 | # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
494 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
477 | # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
- | |
478 | # endif |
495 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
479 | # define rwlock_release(l, n, i) lock_release(l, n, i) |
- | |
480 | #else |
- | |
481 | # define rwlock_acquire(l, s, t, i) do { } while (0) |
- | |
482 | # define rwlock_acquire_read(l, s, t, i) do { } while (0) |
- | |
483 | # define rwlock_release(l, n, i) do { } while (0) |
- | |
484 | #endif |
496 | #define rwlock_release(l, n, i) lock_release(l, n, i) |
485 | - | ||
486 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
- | |
487 | # ifdef CONFIG_PROVE_LOCKING |
497 | |
488 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
- | |
489 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
- | |
490 | # else |
498 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
491 | # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
- | |
492 | # define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
- | |
493 | # endif |
499 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
494 | # define mutex_release(l, n, i) lock_release(l, n, i) |
- | |
495 | #else |
- | |
496 | # define mutex_acquire(l, s, t, i) do { } while (0) |
- | |
497 | # define mutex_acquire_nest(l, s, t, n, i) do { } while (0) |
- | |
498 | # define mutex_release(l, n, i) do { } while (0) |
- | |
499 | #endif |
500 | #define seqcount_release(l, n, i) lock_release(l, n, i) |
500 | - | ||
501 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
- | |
502 | # ifdef CONFIG_PROVE_LOCKING |
501 | |
503 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
502 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
504 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
503 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
505 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
504 | #define mutex_release(l, n, i) lock_release(l, n, i) |
506 | # else |
505 | |
507 | # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
506 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
508 | # define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
507 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
509 | # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
- | |
510 | # endif |
508 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
511 | # define rwsem_release(l, n, i) lock_release(l, n, i) |
- | |
512 | #else |
- | |
513 | # define rwsem_acquire(l, s, t, i) do { } while (0) |
- | |
514 | # define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) |
- | |
515 | # define rwsem_acquire_read(l, s, t, i) do { } while (0) |
- | |
516 | # define rwsem_release(l, n, i) do { } while (0) |
- | |
517 | #endif |
509 | #define rwsem_release(l, n, i) lock_release(l, n, i) |
518 | - | ||
519 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
- | |
520 | # ifdef CONFIG_PROVE_LOCKING |
510 | |
521 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
511 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
522 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) |
- | |
523 | # else |
- | |
524 | # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
512 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
525 | # define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) |
- | |
526 | # endif |
513 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
527 | # define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
- | |
528 | #else |
- | |
529 | # define lock_map_acquire(l) do { } while (0) |
- | |
530 | # define lock_map_acquire_read(l) do { } while (0) |
- | |
531 | # define lock_map_release(l) do { } while (0) |
- | |
532 | #endif |
514 | #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
533 | 515 | ||
534 | #ifdef CONFIG_PROVE_LOCKING |
516 | #ifdef CONFIG_PROVE_LOCKING |
535 | # define might_lock(lock) \ |
517 | # define might_lock(lock) \ |
536 | do { \ |
518 | do { \ |
537 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
519 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
538 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
520 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
539 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
521 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
540 | } while (0) |
522 | } while (0) |
541 | # define might_lock_read(lock) \ |
523 | # define might_lock_read(lock) \ |
542 | do { \ |
524 | do { \ |
543 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
525 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
544 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
526 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
545 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
527 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
546 | } while (0) |
528 | } while (0) |
547 | #else |
529 | #else |
548 | # define might_lock(lock) do { } while (0) |
530 | # define might_lock(lock) do { } while (0) |
549 | # define might_lock_read(lock) do { } while (0) |
531 | # define might_lock_read(lock) do { } while (0) |
550 | #endif |
532 | #endif |
551 | 533 | ||
552 | #ifdef CONFIG_PROVE_RCU |
534 | #ifdef CONFIG_PROVE_RCU |
553 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
535 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
554 | #endif |
536 | #endif |
555 | 537 | ||
556 | #endif /* __LINUX_LOCKDEP_H */><> |
538 | #endif /* __LINUX_LOCKDEP_H */><>> |