Rev 6936 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 6936 | Rev 7143 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Runtime locking correctness validator |
2 | * Runtime locking correctness validator |
3 | * |
3 | * |
4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar |
4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar |
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
6 | * |
6 | * |
7 | * see Documentation/locking/lockdep-design.txt for more details. |
7 | * see Documentation/locking/lockdep-design.txt for more details. |
8 | */ |
8 | */ |
9 | #ifndef __LINUX_LOCKDEP_H |
9 | #ifndef __LINUX_LOCKDEP_H |
10 | #define __LINUX_LOCKDEP_H |
10 | #define __LINUX_LOCKDEP_H |
11 | 11 | ||
12 | struct task_struct; |
12 | struct task_struct; |
13 | struct lockdep_map; |
13 | struct lockdep_map; |
14 | 14 | ||
15 | /* for sysctl */ |
15 | /* for sysctl */ |
16 | extern int prove_locking; |
16 | extern int prove_locking; |
17 | extern int lock_stat; |
17 | extern int lock_stat; |
18 | 18 | ||
19 | #ifdef CONFIG_LOCKDEP |
19 | #ifdef CONFIG_LOCKDEP |
20 | 20 | ||
21 | #include |
21 | #include |
22 | #include |
22 | #include |
23 | #include |
23 | #include |
24 | #include |
24 | #include |
25 | 25 | ||
26 | /* |
26 | /* |
27 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
27 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
28 | * the total number of states... :-( |
28 | * the total number of states... :-( |
29 | */ |
29 | */ |
30 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
30 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
31 | 31 | ||
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
33 | 33 | ||
34 | /* |
34 | /* |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
36 | * cached in the instance of lockdep_map |
36 | * cached in the instance of lockdep_map |
37 | * |
37 | * |
38 | * Currently main class (subclass == 0) and signle depth subclass |
38 | * Currently main class (subclass == 0) and signle depth subclass |
39 | * are cached in lockdep_map. This optimization is mainly targeting |
39 | * are cached in lockdep_map. This optimization is mainly targeting |
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
41 | * single depth. |
41 | * single depth. |
42 | */ |
42 | */ |
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
44 | 44 | ||
45 | /* |
45 | /* |
46 | * Lock-classes are keyed via unique addresses, by embedding the |
46 | * Lock-classes are keyed via unique addresses, by embedding the |
47 | * lockclass-key into the kernel (or module) .data section. (For |
47 | * lockclass-key into the kernel (or module) .data section. (For |
48 | * static locks we use the lock address itself as the key.) |
48 | * static locks we use the lock address itself as the key.) |
49 | */ |
49 | */ |
50 | struct lockdep_subclass_key { |
50 | struct lockdep_subclass_key { |
51 | char __one_byte; |
51 | char __one_byte; |
52 | } __attribute__ ((__packed__)); |
52 | } __attribute__ ((__packed__)); |
53 | 53 | ||
54 | struct lock_class_key { |
54 | struct lock_class_key { |
55 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
55 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
56 | }; |
56 | }; |
57 | 57 | ||
58 | extern struct lock_class_key __lockdep_no_validate__; |
58 | extern struct lock_class_key __lockdep_no_validate__; |
59 | 59 | ||
60 | #define LOCKSTAT_POINTS 4 |
60 | #define LOCKSTAT_POINTS 4 |
61 | 61 | ||
62 | /* |
62 | /* |
63 | * The lock-class itself: |
63 | * The lock-class itself: |
64 | */ |
64 | */ |
65 | struct lock_class { |
65 | struct lock_class { |
66 | /* |
66 | /* |
67 | * class-hash: |
67 | * class-hash: |
68 | */ |
68 | */ |
69 | struct hlist_node hash_entry; |
69 | struct hlist_node hash_entry; |
70 | 70 | ||
71 | /* |
71 | /* |
72 | * global list of all lock-classes: |
72 | * global list of all lock-classes: |
73 | */ |
73 | */ |
74 | struct list_head lock_entry; |
74 | struct list_head lock_entry; |
75 | 75 | ||
76 | struct lockdep_subclass_key *key; |
76 | struct lockdep_subclass_key *key; |
77 | unsigned int subclass; |
77 | unsigned int subclass; |
78 | unsigned int dep_gen_id; |
78 | unsigned int dep_gen_id; |
79 | 79 | ||
80 | /* |
80 | /* |
81 | * IRQ/softirq usage tracking bits: |
81 | * IRQ/softirq usage tracking bits: |
82 | */ |
82 | */ |
83 | unsigned long usage_mask; |
83 | unsigned long usage_mask; |
84 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
84 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
85 | 85 | ||
86 | /* |
86 | /* |
87 | * These fields represent a directed graph of lock dependencies, |
87 | * These fields represent a directed graph of lock dependencies, |
88 | * to every node we attach a list of "forward" and a list of |
88 | * to every node we attach a list of "forward" and a list of |
89 | * "backward" graph nodes. |
89 | * "backward" graph nodes. |
90 | */ |
90 | */ |
91 | struct list_head locks_after, locks_before; |
91 | struct list_head locks_after, locks_before; |
92 | 92 | ||
93 | /* |
93 | /* |
94 | * Generation counter, when doing certain classes of graph walking, |
94 | * Generation counter, when doing certain classes of graph walking, |
95 | * to ensure that we check one node only once: |
95 | * to ensure that we check one node only once: |
96 | */ |
96 | */ |
97 | unsigned int version; |
97 | unsigned int version; |
98 | 98 | ||
99 | /* |
99 | /* |
100 | * Statistics counter: |
100 | * Statistics counter: |
101 | */ |
101 | */ |
102 | unsigned long ops; |
102 | unsigned long ops; |
103 | 103 | ||
104 | const char *name; |
104 | const char *name; |
105 | int name_version; |
105 | int name_version; |
106 | 106 | ||
107 | #ifdef CONFIG_LOCK_STAT |
107 | #ifdef CONFIG_LOCK_STAT |
108 | unsigned long contention_point[LOCKSTAT_POINTS]; |
108 | unsigned long contention_point[LOCKSTAT_POINTS]; |
109 | unsigned long contending_point[LOCKSTAT_POINTS]; |
109 | unsigned long contending_point[LOCKSTAT_POINTS]; |
110 | #endif |
110 | #endif |
111 | }; |
111 | }; |
112 | 112 | ||
113 | #ifdef CONFIG_LOCK_STAT |
113 | #ifdef CONFIG_LOCK_STAT |
114 | struct lock_time { |
114 | struct lock_time { |
115 | s64 min; |
115 | s64 min; |
116 | s64 max; |
116 | s64 max; |
117 | s64 total; |
117 | s64 total; |
118 | unsigned long nr; |
118 | unsigned long nr; |
119 | }; |
119 | }; |
120 | 120 | ||
121 | enum bounce_type { |
121 | enum bounce_type { |
122 | bounce_acquired_write, |
122 | bounce_acquired_write, |
123 | bounce_acquired_read, |
123 | bounce_acquired_read, |
124 | bounce_contended_write, |
124 | bounce_contended_write, |
125 | bounce_contended_read, |
125 | bounce_contended_read, |
126 | nr_bounce_types, |
126 | nr_bounce_types, |
127 | 127 | ||
128 | bounce_acquired = bounce_acquired_write, |
128 | bounce_acquired = bounce_acquired_write, |
129 | bounce_contended = bounce_contended_write, |
129 | bounce_contended = bounce_contended_write, |
130 | }; |
130 | }; |
131 | 131 | ||
132 | struct lock_class_stats { |
132 | struct lock_class_stats { |
133 | unsigned long contention_point[LOCKSTAT_POINTS]; |
133 | unsigned long contention_point[LOCKSTAT_POINTS]; |
134 | unsigned long contending_point[LOCKSTAT_POINTS]; |
134 | unsigned long contending_point[LOCKSTAT_POINTS]; |
135 | struct lock_time read_waittime; |
135 | struct lock_time read_waittime; |
136 | struct lock_time write_waittime; |
136 | struct lock_time write_waittime; |
137 | struct lock_time read_holdtime; |
137 | struct lock_time read_holdtime; |
138 | struct lock_time write_holdtime; |
138 | struct lock_time write_holdtime; |
139 | unsigned long bounces[nr_bounce_types]; |
139 | unsigned long bounces[nr_bounce_types]; |
140 | }; |
140 | }; |
141 | 141 | ||
142 | struct lock_class_stats lock_stats(struct lock_class *class); |
142 | struct lock_class_stats lock_stats(struct lock_class *class); |
143 | void clear_lock_stats(struct lock_class *class); |
143 | void clear_lock_stats(struct lock_class *class); |
144 | #endif |
144 | #endif |
145 | 145 | ||
146 | /* |
146 | /* |
147 | * Map the lock object (the lock instance) to the lock-class object. |
147 | * Map the lock object (the lock instance) to the lock-class object. |
148 | * This is embedded into specific lock instances: |
148 | * This is embedded into specific lock instances: |
149 | */ |
149 | */ |
150 | struct lockdep_map { |
150 | struct lockdep_map { |
151 | struct lock_class_key *key; |
151 | struct lock_class_key *key; |
152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
153 | const char *name; |
153 | const char *name; |
154 | #ifdef CONFIG_LOCK_STAT |
154 | #ifdef CONFIG_LOCK_STAT |
155 | int cpu; |
155 | int cpu; |
156 | unsigned long ip; |
156 | unsigned long ip; |
157 | #endif |
157 | #endif |
158 | }; |
158 | }; |
159 | 159 | ||
160 | static inline void lockdep_copy_map(struct lockdep_map *to, |
160 | static inline void lockdep_copy_map(struct lockdep_map *to, |
161 | struct lockdep_map *from) |
161 | struct lockdep_map *from) |
162 | { |
162 | { |
163 | int i; |
163 | int i; |
164 | 164 | ||
165 | *to = *from; |
165 | *to = *from; |
166 | /* |
166 | /* |
167 | * Since the class cache can be modified concurrently we could observe |
167 | * Since the class cache can be modified concurrently we could observe |
168 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
168 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
169 | * the caches and take the performance hit. |
169 | * the caches and take the performance hit. |
170 | * |
170 | * |
171 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
171 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
172 | * that relies on cache abuse. |
172 | * that relies on cache abuse. |
173 | */ |
173 | */ |
174 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
174 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
175 | to->class_cache[i] = NULL; |
175 | to->class_cache[i] = NULL; |
176 | } |
176 | } |
177 | 177 | ||
178 | /* |
178 | /* |
179 | * Every lock has a list of other locks that were taken after it. |
179 | * Every lock has a list of other locks that were taken after it. |
180 | * We only grow the list, never remove from it: |
180 | * We only grow the list, never remove from it: |
181 | */ |
181 | */ |
182 | struct lock_list { |
182 | struct lock_list { |
183 | struct list_head entry; |
183 | struct list_head entry; |
184 | struct lock_class *class; |
184 | struct lock_class *class; |
185 | struct stack_trace trace; |
185 | struct stack_trace trace; |
186 | int distance; |
186 | int distance; |
187 | 187 | ||
188 | /* |
188 | /* |
189 | * The parent field is used to implement breadth-first search, and the |
189 | * The parent field is used to implement breadth-first search, and the |
190 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
190 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
191 | */ |
191 | */ |
192 | struct lock_list *parent; |
192 | struct lock_list *parent; |
193 | }; |
193 | }; |
194 | 194 | ||
195 | /* |
195 | /* |
196 | * We record lock dependency chains, so that we can cache them: |
196 | * We record lock dependency chains, so that we can cache them: |
197 | */ |
197 | */ |
198 | struct lock_chain { |
198 | struct lock_chain { |
- | 199 | /* see BUILD_BUG_ON()s in lookup_chain_cache() */ |
|
199 | u8 irq_context; |
200 | unsigned int irq_context : 2, |
200 | u8 depth; |
201 | depth : 6, |
201 | u16 base; |
202 | base : 24; |
- | 203 | /* 4 byte hole */ |
|
202 | struct hlist_node entry; |
204 | struct hlist_node entry; |
203 | u64 chain_key; |
205 | u64 chain_key; |
204 | }; |
206 | }; |
205 | 207 | ||
206 | #define MAX_LOCKDEP_KEYS_BITS 13 |
208 | #define MAX_LOCKDEP_KEYS_BITS 13 |
207 | /* |
209 | /* |
208 | * Subtract one because we offset hlock->class_idx by 1 in order |
210 | * Subtract one because we offset hlock->class_idx by 1 in order |
209 | * to make 0 mean no class. This avoids overflowing the class_idx |
211 | * to make 0 mean no class. This avoids overflowing the class_idx |
210 | * bitfield and hitting the BUG in hlock_class(). |
212 | * bitfield and hitting the BUG in hlock_class(). |
211 | */ |
213 | */ |
212 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
214 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
213 | 215 | ||
214 | struct held_lock { |
216 | struct held_lock { |
215 | /* |
217 | /* |
216 | * One-way hash of the dependency chain up to this point. We |
218 | * One-way hash of the dependency chain up to this point. We |
217 | * hash the hashes step by step as the dependency chain grows. |
219 | * hash the hashes step by step as the dependency chain grows. |
218 | * |
220 | * |
219 | * We use it for dependency-caching and we skip detection |
221 | * We use it for dependency-caching and we skip detection |
220 | * passes and dependency-updates if there is a cache-hit, so |
222 | * passes and dependency-updates if there is a cache-hit, so |
221 | * it is absolutely critical for 100% coverage of the validator |
223 | * it is absolutely critical for 100% coverage of the validator |
222 | * to have a unique key value for every unique dependency path |
224 | * to have a unique key value for every unique dependency path |
223 | * that can occur in the system, to make a unique hash value |
225 | * that can occur in the system, to make a unique hash value |
224 | * as likely as possible - hence the 64-bit width. |
226 | * as likely as possible - hence the 64-bit width. |
225 | * |
227 | * |
226 | * The task struct holds the current hash value (initialized |
228 | * The task struct holds the current hash value (initialized |
227 | * with zero), here we store the previous hash value: |
229 | * with zero), here we store the previous hash value: |
228 | */ |
230 | */ |
229 | u64 prev_chain_key; |
231 | u64 prev_chain_key; |
230 | unsigned long acquire_ip; |
232 | unsigned long acquire_ip; |
231 | struct lockdep_map *instance; |
233 | struct lockdep_map *instance; |
232 | struct lockdep_map *nest_lock; |
234 | struct lockdep_map *nest_lock; |
233 | #ifdef CONFIG_LOCK_STAT |
235 | #ifdef CONFIG_LOCK_STAT |
234 | u64 waittime_stamp; |
236 | u64 waittime_stamp; |
235 | u64 holdtime_stamp; |
237 | u64 holdtime_stamp; |
236 | #endif |
238 | #endif |
237 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
239 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
238 | /* |
240 | /* |
239 | * The lock-stack is unified in that the lock chains of interrupt |
241 | * The lock-stack is unified in that the lock chains of interrupt |
240 | * contexts nest ontop of process context chains, but we 'separate' |
242 | * contexts nest ontop of process context chains, but we 'separate' |
241 | * the hashes by starting with 0 if we cross into an interrupt |
243 | * the hashes by starting with 0 if we cross into an interrupt |
242 | * context, and we also keep do not add cross-context lock |
244 | * context, and we also keep do not add cross-context lock |
243 | * dependencies - the lock usage graph walking covers that area |
245 | * dependencies - the lock usage graph walking covers that area |
244 | * anyway, and we'd just unnecessarily increase the number of |
246 | * anyway, and we'd just unnecessarily increase the number of |
245 | * dependencies otherwise. [Note: hardirq and softirq contexts |
247 | * dependencies otherwise. [Note: hardirq and softirq contexts |
246 | * are separated from each other too.] |
248 | * are separated from each other too.] |
247 | * |
249 | * |
248 | * The following field is used to detect when we cross into an |
250 | * The following field is used to detect when we cross into an |
249 | * interrupt context: |
251 | * interrupt context: |
250 | */ |
252 | */ |
251 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
253 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
252 | unsigned int trylock:1; /* 16 bits */ |
254 | unsigned int trylock:1; /* 16 bits */ |
253 | 255 | ||
254 | unsigned int read:2; /* see lock_acquire() comment */ |
256 | unsigned int read:2; /* see lock_acquire() comment */ |
255 | unsigned int check:1; /* see lock_acquire() comment */ |
257 | unsigned int check:1; /* see lock_acquire() comment */ |
256 | unsigned int hardirqs_off:1; |
258 | unsigned int hardirqs_off:1; |
257 | unsigned int references:12; /* 32 bits */ |
259 | unsigned int references:12; /* 32 bits */ |
258 | unsigned int pin_count; |
260 | unsigned int pin_count; |
259 | }; |
261 | }; |
260 | 262 | ||
261 | /* |
263 | /* |
262 | * Initialization, self-test and debugging-output methods: |
264 | * Initialization, self-test and debugging-output methods: |
263 | */ |
265 | */ |
264 | extern void lockdep_init(void); |
- | |
265 | extern void lockdep_info(void); |
266 | extern void lockdep_info(void); |
266 | extern void lockdep_reset(void); |
267 | extern void lockdep_reset(void); |
267 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
268 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
268 | extern void lockdep_free_key_range(void *start, unsigned long size); |
269 | extern void lockdep_free_key_range(void *start, unsigned long size); |
269 | extern asmlinkage void lockdep_sys_exit(void); |
270 | extern asmlinkage void lockdep_sys_exit(void); |
270 | 271 | ||
271 | extern void lockdep_off(void); |
272 | extern void lockdep_off(void); |
272 | extern void lockdep_on(void); |
273 | extern void lockdep_on(void); |
273 | 274 | ||
274 | /* |
275 | /* |
275 | * These methods are used by specific locking variants (spinlocks, |
276 | * These methods are used by specific locking variants (spinlocks, |
276 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
277 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
277 | * to lockdep: |
278 | * to lockdep: |
278 | */ |
279 | */ |
279 | 280 | ||
280 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
281 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
281 | struct lock_class_key *key, int subclass); |
282 | struct lock_class_key *key, int subclass); |
282 | 283 | ||
283 | /* |
284 | /* |
284 | * To initialize a lockdep_map statically use this macro. |
285 | * To initialize a lockdep_map statically use this macro. |
285 | * Note that _name must not be NULL. |
286 | * Note that _name must not be NULL. |
286 | */ |
287 | */ |
287 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
288 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
288 | { .name = (_name), .key = (void *)(_key), } |
289 | { .name = (_name), .key = (void *)(_key), } |
289 | 290 | ||
290 | /* |
291 | /* |
291 | * Reinitialize a lock key - for cases where there is special locking or |
292 | * Reinitialize a lock key - for cases where there is special locking or |
292 | * special initialization of locks so that the validator gets the scope |
293 | * special initialization of locks so that the validator gets the scope |
293 | * of dependencies wrong: they are either too broad (they need a class-split) |
294 | * of dependencies wrong: they are either too broad (they need a class-split) |
294 | * or they are too narrow (they suffer from a false class-split): |
295 | * or they are too narrow (they suffer from a false class-split): |
295 | */ |
296 | */ |
296 | #define lockdep_set_class(lock, key) \ |
297 | #define lockdep_set_class(lock, key) \ |
297 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
298 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
298 | #define lockdep_set_class_and_name(lock, key, name) \ |
299 | #define lockdep_set_class_and_name(lock, key, name) \ |
299 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
300 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
300 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
301 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
301 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
302 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
302 | #define lockdep_set_subclass(lock, sub) \ |
303 | #define lockdep_set_subclass(lock, sub) \ |
303 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
304 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
304 | (lock)->dep_map.key, sub) |
305 | (lock)->dep_map.key, sub) |
305 | 306 | ||
306 | #define lockdep_set_novalidate_class(lock) \ |
307 | #define lockdep_set_novalidate_class(lock) \ |
307 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
308 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
308 | /* |
309 | /* |
309 | * Compare locking classes |
310 | * Compare locking classes |
310 | */ |
311 | */ |
311 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
312 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
312 | 313 | ||
313 | static inline int lockdep_match_key(struct lockdep_map *lock, |
314 | static inline int lockdep_match_key(struct lockdep_map *lock, |
314 | struct lock_class_key *key) |
315 | struct lock_class_key *key) |
315 | { |
316 | { |
316 | return lock->key == key; |
317 | return lock->key == key; |
317 | } |
318 | } |
318 | 319 | ||
319 | /* |
320 | /* |
320 | * Acquire a lock. |
321 | * Acquire a lock. |
321 | * |
322 | * |
322 | * Values for "read": |
323 | * Values for "read": |
323 | * |
324 | * |
324 | * 0: exclusive (write) acquire |
325 | * 0: exclusive (write) acquire |
325 | * 1: read-acquire (no recursion allowed) |
326 | * 1: read-acquire (no recursion allowed) |
326 | * 2: read-acquire with same-instance recursion allowed |
327 | * 2: read-acquire with same-instance recursion allowed |
327 | * |
328 | * |
328 | * Values for check: |
329 | * Values for check: |
329 | * |
330 | * |
330 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
331 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
331 | * 1: full validation |
332 | * 1: full validation |
332 | */ |
333 | */ |
333 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
334 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
334 | int trylock, int read, int check, |
335 | int trylock, int read, int check, |
335 | struct lockdep_map *nest_lock, unsigned long ip); |
336 | struct lockdep_map *nest_lock, unsigned long ip); |
336 | 337 | ||
337 | extern void lock_release(struct lockdep_map *lock, int nested, |
338 | extern void lock_release(struct lockdep_map *lock, int nested, |
338 | unsigned long ip); |
339 | unsigned long ip); |
339 | 340 | ||
340 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
341 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
341 | 342 | ||
342 | extern int lock_is_held(struct lockdep_map *lock); |
343 | extern int lock_is_held(struct lockdep_map *lock); |
343 | 344 | ||
344 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
345 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
345 | struct lock_class_key *key, unsigned int subclass, |
346 | struct lock_class_key *key, unsigned int subclass, |
346 | unsigned long ip); |
347 | unsigned long ip); |
347 | 348 | ||
348 | static inline void lock_set_subclass(struct lockdep_map *lock, |
349 | static inline void lock_set_subclass(struct lockdep_map *lock, |
349 | unsigned int subclass, unsigned long ip) |
350 | unsigned int subclass, unsigned long ip) |
350 | { |
351 | { |
351 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
352 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
352 | } |
353 | } |
353 | 354 | ||
354 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
355 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
355 | extern void lockdep_clear_current_reclaim_state(void); |
356 | extern void lockdep_clear_current_reclaim_state(void); |
356 | extern void lockdep_trace_alloc(gfp_t mask); |
357 | extern void lockdep_trace_alloc(gfp_t mask); |
357 | 358 | ||
358 | extern void lock_pin_lock(struct lockdep_map *lock); |
359 | extern void lock_pin_lock(struct lockdep_map *lock); |
359 | extern void lock_unpin_lock(struct lockdep_map *lock); |
360 | extern void lock_unpin_lock(struct lockdep_map *lock); |
360 | 361 | ||
361 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
362 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
362 | 363 | ||
363 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
364 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
364 | 365 | ||
365 | #define lockdep_assert_held(l) do { \ |
366 | #define lockdep_assert_held(l) do { \ |
366 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
367 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
367 | } while (0) |
368 | } while (0) |
368 | 369 | ||
369 | #define lockdep_assert_held_once(l) do { \ |
370 | #define lockdep_assert_held_once(l) do { \ |
370 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
371 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
371 | } while (0) |
372 | } while (0) |
372 | 373 | ||
373 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
374 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
374 | 375 | ||
375 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
376 | #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map) |
376 | #define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map) |
377 | #define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map) |
377 | 378 | ||
378 | #else /* !CONFIG_LOCKDEP */ |
379 | #else /* !CONFIG_LOCKDEP */ |
379 | 380 | ||
380 | static inline void lockdep_off(void) |
381 | static inline void lockdep_off(void) |
381 | { |
382 | { |
382 | } |
383 | } |
383 | 384 | ||
384 | static inline void lockdep_on(void) |
385 | static inline void lockdep_on(void) |
385 | { |
386 | { |
386 | } |
387 | } |
387 | 388 | ||
388 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
389 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
389 | # define lock_release(l, n, i) do { } while (0) |
390 | # define lock_release(l, n, i) do { } while (0) |
390 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
391 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
391 | # define lock_set_subclass(l, s, i) do { } while (0) |
392 | # define lock_set_subclass(l, s, i) do { } while (0) |
392 | # define lockdep_set_current_reclaim_state(g) do { } while (0) |
393 | # define lockdep_set_current_reclaim_state(g) do { } while (0) |
393 | # define lockdep_clear_current_reclaim_state() do { } while (0) |
394 | # define lockdep_clear_current_reclaim_state() do { } while (0) |
394 | # define lockdep_trace_alloc(g) do { } while (0) |
395 | # define lockdep_trace_alloc(g) do { } while (0) |
395 | # define lockdep_init() do { } while (0) |
- | |
396 | # define lockdep_info() do { } while (0) |
396 | # define lockdep_info() do { } while (0) |
397 | # define lockdep_init_map(lock, name, key, sub) \ |
397 | # define lockdep_init_map(lock, name, key, sub) \ |
398 | do { (void)(name); (void)(key); } while (0) |
398 | do { (void)(name); (void)(key); } while (0) |
399 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
399 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
400 | # define lockdep_set_class_and_name(lock, key, name) \ |
400 | # define lockdep_set_class_and_name(lock, key, name) \ |
401 | do { (void)(key); (void)(name); } while (0) |
401 | do { (void)(key); (void)(name); } while (0) |
402 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
402 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
403 | do { (void)(key); } while (0) |
403 | do { (void)(key); } while (0) |
404 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
404 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
405 | 405 | ||
406 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
406 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
407 | 407 | ||
408 | /* |
408 | /* |
409 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
409 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
410 | * case since the result is not well defined and the caller should rather |
410 | * case since the result is not well defined and the caller should rather |
411 | * #ifdef the call himself. |
411 | * #ifdef the call himself. |
412 | */ |
412 | */ |
413 | 413 | ||
414 | # define INIT_LOCKDEP |
414 | # define INIT_LOCKDEP |
415 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
415 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
416 | # define lockdep_free_key_range(start, size) do { } while (0) |
416 | # define lockdep_free_key_range(start, size) do { } while (0) |
417 | # define lockdep_sys_exit() do { } while (0) |
417 | # define lockdep_sys_exit() do { } while (0) |
418 | /* |
418 | /* |
419 | * The class key takes no space if lockdep is disabled: |
419 | * The class key takes no space if lockdep is disabled: |
420 | */ |
420 | */ |
421 | struct lock_class_key { }; |
421 | struct lock_class_key { }; |
422 | 422 | ||
423 | #define lockdep_depth(tsk) (0) |
423 | #define lockdep_depth(tsk) (0) |
424 | 424 | ||
425 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
425 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
426 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
426 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
427 | 427 | ||
428 | #define lockdep_recursing(tsk) (0) |
428 | #define lockdep_recursing(tsk) (0) |
429 | 429 | ||
430 | #define lockdep_pin_lock(l) do { (void)(l); } while (0) |
430 | #define lockdep_pin_lock(l) do { (void)(l); } while (0) |
431 | #define lockdep_unpin_lock(l) do { (void)(l); } while (0) |
431 | #define lockdep_unpin_lock(l) do { (void)(l); } while (0) |
432 | 432 | ||
433 | #endif /* !LOCKDEP */ |
433 | #endif /* !LOCKDEP */ |
434 | 434 | ||
435 | #ifdef CONFIG_LOCK_STAT |
435 | #ifdef CONFIG_LOCK_STAT |
436 | 436 | ||
437 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
437 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
438 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
438 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
439 | 439 | ||
440 | #define LOCK_CONTENDED(_lock, try, lock) \ |
440 | #define LOCK_CONTENDED(_lock, try, lock) \ |
441 | do { \ |
441 | do { \ |
442 | if (!try(_lock)) { \ |
442 | if (!try(_lock)) { \ |
443 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
443 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
444 | lock(_lock); \ |
444 | lock(_lock); \ |
445 | } \ |
445 | } \ |
446 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
446 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
447 | } while (0) |
447 | } while (0) |
448 | 448 | ||
449 | #else /* CONFIG_LOCK_STAT */ |
449 | #else /* CONFIG_LOCK_STAT */ |
450 | 450 | ||
451 | #define lock_contended(lockdep_map, ip) do {} while (0) |
451 | #define lock_contended(lockdep_map, ip) do {} while (0) |
452 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
452 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
453 | 453 | ||
454 | #define LOCK_CONTENDED(_lock, try, lock) \ |
454 | #define LOCK_CONTENDED(_lock, try, lock) \ |
455 | lock(_lock) |
455 | lock(_lock) |
456 | 456 | ||
457 | #endif /* CONFIG_LOCK_STAT */ |
457 | #endif /* CONFIG_LOCK_STAT */ |
458 | 458 | ||
459 | #ifdef CONFIG_LOCKDEP |
459 | #ifdef CONFIG_LOCKDEP |
460 | 460 | ||
461 | /* |
461 | /* |
462 | * On lockdep we dont want the hand-coded irq-enable of |
462 | * On lockdep we dont want the hand-coded irq-enable of |
463 | * _raw_*_lock_flags() code, because lockdep assumes |
463 | * _raw_*_lock_flags() code, because lockdep assumes |
464 | * that interrupts are not re-enabled during lock-acquire: |
464 | * that interrupts are not re-enabled during lock-acquire: |
465 | */ |
465 | */ |
466 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
466 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
467 | LOCK_CONTENDED((_lock), (try), (lock)) |
467 | LOCK_CONTENDED((_lock), (try), (lock)) |
468 | 468 | ||
469 | #else /* CONFIG_LOCKDEP */ |
469 | #else /* CONFIG_LOCKDEP */ |
470 | 470 | ||
471 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
471 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
472 | lockfl((_lock), (flags)) |
472 | lockfl((_lock), (flags)) |
473 | 473 | ||
474 | #endif /* CONFIG_LOCKDEP */ |
474 | #endif /* CONFIG_LOCKDEP */ |
475 | 475 | ||
476 | #ifdef CONFIG_TRACE_IRQFLAGS |
476 | #ifdef CONFIG_TRACE_IRQFLAGS |
477 | extern void print_irqtrace_events(struct task_struct *curr); |
477 | extern void print_irqtrace_events(struct task_struct *curr); |
478 | #else |
478 | #else |
479 | static inline void print_irqtrace_events(struct task_struct *curr) |
479 | static inline void print_irqtrace_events(struct task_struct *curr) |
480 | { |
480 | { |
481 | } |
481 | } |
482 | #endif |
482 | #endif |
483 | 483 | ||
484 | /* |
484 | /* |
485 | * For trivial one-depth nesting of a lock-class, the following |
485 | * For trivial one-depth nesting of a lock-class, the following |
486 | * global define can be used. (Subsystems with multiple levels |
486 | * global define can be used. (Subsystems with multiple levels |
487 | * of nesting should define their own lock-nesting subclasses.) |
487 | * of nesting should define their own lock-nesting subclasses.) |
488 | */ |
488 | */ |
489 | #define SINGLE_DEPTH_NESTING 1 |
489 | #define SINGLE_DEPTH_NESTING 1 |
490 | 490 | ||
491 | /* |
491 | /* |
492 | * Map the dependency ops to NOP or to real lockdep ops, depending |
492 | * Map the dependency ops to NOP or to real lockdep ops, depending |
493 | * on the per lock-class debug mode: |
493 | * on the per lock-class debug mode: |
494 | */ |
494 | */ |
495 | 495 | ||
496 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
496 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
497 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
497 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
498 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
498 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
499 | 499 | ||
500 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
500 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
501 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
501 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
502 | #define spin_release(l, n, i) lock_release(l, n, i) |
502 | #define spin_release(l, n, i) lock_release(l, n, i) |
503 | 503 | ||
504 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
504 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
505 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
505 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
506 | #define rwlock_release(l, n, i) lock_release(l, n, i) |
506 | #define rwlock_release(l, n, i) lock_release(l, n, i) |
507 | 507 | ||
508 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
508 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
509 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
509 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
510 | #define seqcount_release(l, n, i) lock_release(l, n, i) |
510 | #define seqcount_release(l, n, i) lock_release(l, n, i) |
511 | 511 | ||
512 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
512 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
513 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
513 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
514 | #define mutex_release(l, n, i) lock_release(l, n, i) |
514 | #define mutex_release(l, n, i) lock_release(l, n, i) |
515 | 515 | ||
516 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
516 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
517 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
517 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
518 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
518 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
519 | #define rwsem_release(l, n, i) lock_release(l, n, i) |
519 | #define rwsem_release(l, n, i) lock_release(l, n, i) |
520 | 520 | ||
521 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
521 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
522 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
522 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
523 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
523 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
524 | #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
524 | #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
525 | 525 | ||
526 | #ifdef CONFIG_PROVE_LOCKING |
526 | #ifdef CONFIG_PROVE_LOCKING |
527 | # define might_lock(lock) \ |
527 | # define might_lock(lock) \ |
528 | do { \ |
528 | do { \ |
529 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
529 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
530 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
530 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
531 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
531 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
532 | } while (0) |
532 | } while (0) |
533 | # define might_lock_read(lock) \ |
533 | # define might_lock_read(lock) \ |
534 | do { \ |
534 | do { \ |
535 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
535 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
536 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
536 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
537 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
537 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
538 | } while (0) |
538 | } while (0) |
539 | #else |
539 | #else |
540 | # define might_lock(lock) do { } while (0) |
540 | # define might_lock(lock) do { } while (0) |
541 | # define might_lock_read(lock) do { } while (0) |
541 | # define might_lock_read(lock) do { } while (0) |
542 | #endif |
542 | #endif |
543 | 543 | ||
544 | #ifdef CONFIG_LOCKDEP |
544 | #ifdef CONFIG_LOCKDEP |
545 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
545 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
546 | #else |
546 | #else |
547 | static inline void |
547 | static inline void |
548 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) |
548 | lockdep_rcu_suspicious(const char *file, const int line, const char *s) |
549 | { |
549 | { |
550 | } |
550 | } |
551 | #endif |
551 | #endif |
552 | 552 | ||
553 | #endif /* __LINUX_LOCKDEP_H */><>> |
553 | #endif /* __LINUX_LOCKDEP_H */><>> |