Rev 5056 | Rev 6082 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
1408 | serge | 1 | /* |
2 | * Runtime locking correctness validator |
||
3 | * |
||
4 | * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar |
||
5 | * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra |
||
6 | * |
||
5270 | serge | 7 | * see Documentation/locking/lockdep-design.txt for more details. |
1408 | serge | 8 | */ |
9 | #ifndef __LINUX_LOCKDEP_H |
||
10 | #define __LINUX_LOCKDEP_H |
||
11 | |||
12 | struct task_struct; |
||
13 | struct lockdep_map; |
||
14 | |||
5270 | serge | 15 | /* for sysctl */ |
16 | extern int prove_locking; |
||
17 | extern int lock_stat; |
||
18 | |||
1408 | serge | 19 | #ifdef CONFIG_LOCKDEP |
20 | |||
21 | #include |
||
22 | #include |
||
23 | #include |
||
24 | #include |
||
25 | |||
26 | /* |
||
27 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
||
28 | * the total number of states... :-( |
||
29 | */ |
||
30 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
||
31 | |||
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
||
33 | |||
34 | /* |
||
1964 | serge | 35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes |
36 | * cached in the instance of lockdep_map |
||
37 | * |
||
38 | * Currently main class (subclass == 0) and signle depth subclass |
||
39 | * are cached in lockdep_map. This optimization is mainly targeting |
||
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with |
||
41 | * single depth. |
||
42 | */ |
||
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
||
44 | |||
45 | /* |
||
1408 | serge | 46 | * Lock-classes are keyed via unique addresses, by embedding the |
47 | * lockclass-key into the kernel (or module) .data section. (For |
||
48 | * static locks we use the lock address itself as the key.) |
||
49 | */ |
||
50 | struct lockdep_subclass_key { |
||
51 | char __one_byte; |
||
52 | } __attribute__ ((__packed__)); |
||
53 | |||
54 | struct lock_class_key { |
||
55 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
||
56 | }; |
||
57 | |||
5270 | serge | 58 | extern struct lock_class_key __lockdep_no_validate__; |
59 | |||
1408 | serge | 60 | #define LOCKSTAT_POINTS 4 |
61 | |||
62 | /* |
||
63 | * The lock-class itself: |
||
64 | */ |
||
65 | struct lock_class { |
||
66 | /* |
||
67 | * class-hash: |
||
68 | */ |
||
69 | struct list_head hash_entry; |
||
70 | |||
71 | /* |
||
72 | * global list of all lock-classes: |
||
73 | */ |
||
74 | struct list_head lock_entry; |
||
75 | |||
76 | struct lockdep_subclass_key *key; |
||
77 | unsigned int subclass; |
||
78 | unsigned int dep_gen_id; |
||
79 | |||
80 | /* |
||
81 | * IRQ/softirq usage tracking bits: |
||
82 | */ |
||
83 | unsigned long usage_mask; |
||
84 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
||
85 | |||
86 | /* |
||
87 | * These fields represent a directed graph of lock dependencies, |
||
88 | * to every node we attach a list of "forward" and a list of |
||
89 | * "backward" graph nodes. |
||
90 | */ |
||
91 | struct list_head locks_after, locks_before; |
||
92 | |||
93 | /* |
||
94 | * Generation counter, when doing certain classes of graph walking, |
||
95 | * to ensure that we check one node only once: |
||
96 | */ |
||
97 | unsigned int version; |
||
98 | |||
99 | /* |
||
100 | * Statistics counter: |
||
101 | */ |
||
102 | unsigned long ops; |
||
103 | |||
104 | const char *name; |
||
105 | int name_version; |
||
106 | |||
107 | #ifdef CONFIG_LOCK_STAT |
||
108 | unsigned long contention_point[LOCKSTAT_POINTS]; |
||
109 | unsigned long contending_point[LOCKSTAT_POINTS]; |
||
110 | #endif |
||
111 | }; |
||
112 | |||
113 | #ifdef CONFIG_LOCK_STAT |
||
114 | struct lock_time { |
||
115 | s64 min; |
||
116 | s64 max; |
||
117 | s64 total; |
||
118 | unsigned long nr; |
||
119 | }; |
||
120 | |||
121 | enum bounce_type { |
||
122 | bounce_acquired_write, |
||
123 | bounce_acquired_read, |
||
124 | bounce_contended_write, |
||
125 | bounce_contended_read, |
||
126 | nr_bounce_types, |
||
127 | |||
128 | bounce_acquired = bounce_acquired_write, |
||
129 | bounce_contended = bounce_contended_write, |
||
130 | }; |
||
131 | |||
132 | struct lock_class_stats { |
||
133 | unsigned long contention_point[4]; |
||
134 | unsigned long contending_point[4]; |
||
135 | struct lock_time read_waittime; |
||
136 | struct lock_time write_waittime; |
||
137 | struct lock_time read_holdtime; |
||
138 | struct lock_time write_holdtime; |
||
139 | unsigned long bounces[nr_bounce_types]; |
||
140 | }; |
||
141 | |||
142 | struct lock_class_stats lock_stats(struct lock_class *class); |
||
143 | void clear_lock_stats(struct lock_class *class); |
||
144 | #endif |
||
145 | |||
146 | /* |
||
147 | * Map the lock object (the lock instance) to the lock-class object. |
||
148 | * This is embedded into specific lock instances: |
||
149 | */ |
||
150 | struct lockdep_map { |
||
151 | struct lock_class_key *key; |
||
1964 | serge | 152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
1408 | serge | 153 | const char *name; |
154 | #ifdef CONFIG_LOCK_STAT |
||
155 | int cpu; |
||
156 | unsigned long ip; |
||
157 | #endif |
||
158 | }; |
||
159 | |||
5270 | serge | 160 | static inline void lockdep_copy_map(struct lockdep_map *to, |
161 | struct lockdep_map *from) |
||
162 | { |
||
163 | int i; |
||
164 | |||
165 | *to = *from; |
||
166 | /* |
||
167 | * Since the class cache can be modified concurrently we could observe |
||
168 | * half pointers (64bit arch using 32bit copy insns). Therefore clear |
||
169 | * the caches and take the performance hit. |
||
170 | * |
||
171 | * XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
||
172 | * that relies on cache abuse. |
||
173 | */ |
||
174 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
||
175 | to->class_cache[i] = NULL; |
||
176 | } |
||
177 | |||
1408 | serge | 178 | /* |
179 | * Every lock has a list of other locks that were taken after it. |
||
180 | * We only grow the list, never remove from it: |
||
181 | */ |
||
182 | struct lock_list { |
||
183 | struct list_head entry; |
||
184 | struct lock_class *class; |
||
185 | struct stack_trace trace; |
||
186 | int distance; |
||
187 | |||
188 | /* |
||
189 | * The parent field is used to implement breadth-first search, and the |
||
190 | * bit 0 is reused to indicate if the lock has been accessed in BFS. |
||
191 | */ |
||
192 | struct lock_list *parent; |
||
193 | }; |
||
194 | |||
195 | /* |
||
196 | * We record lock dependency chains, so that we can cache them: |
||
197 | */ |
||
198 | struct lock_chain { |
||
199 | u8 irq_context; |
||
200 | u8 depth; |
||
201 | u16 base; |
||
202 | struct list_head entry; |
||
203 | u64 chain_key; |
||
204 | }; |
||
205 | |||
206 | #define MAX_LOCKDEP_KEYS_BITS 13 |
||
207 | /* |
||
208 | * Subtract one because we offset hlock->class_idx by 1 in order |
||
209 | * to make 0 mean no class. This avoids overflowing the class_idx |
||
210 | * bitfield and hitting the BUG in hlock_class(). |
||
211 | */ |
||
212 | #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1) |
||
213 | |||
214 | struct held_lock { |
||
215 | /* |
||
216 | * One-way hash of the dependency chain up to this point. We |
||
217 | * hash the hashes step by step as the dependency chain grows. |
||
218 | * |
||
219 | * We use it for dependency-caching and we skip detection |
||
220 | * passes and dependency-updates if there is a cache-hit, so |
||
221 | * it is absolutely critical for 100% coverage of the validator |
||
222 | * to have a unique key value for every unique dependency path |
||
223 | * that can occur in the system, to make a unique hash value |
||
224 | * as likely as possible - hence the 64-bit width. |
||
225 | * |
||
226 | * The task struct holds the current hash value (initialized |
||
227 | * with zero), here we store the previous hash value: |
||
228 | */ |
||
229 | u64 prev_chain_key; |
||
230 | unsigned long acquire_ip; |
||
231 | struct lockdep_map *instance; |
||
232 | struct lockdep_map *nest_lock; |
||
233 | #ifdef CONFIG_LOCK_STAT |
||
234 | u64 waittime_stamp; |
||
235 | u64 holdtime_stamp; |
||
236 | #endif |
||
237 | unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS; |
||
238 | /* |
||
239 | * The lock-stack is unified in that the lock chains of interrupt |
||
240 | * contexts nest ontop of process context chains, but we 'separate' |
||
241 | * the hashes by starting with 0 if we cross into an interrupt |
||
242 | * context, and we also keep do not add cross-context lock |
||
243 | * dependencies - the lock usage graph walking covers that area |
||
244 | * anyway, and we'd just unnecessarily increase the number of |
||
245 | * dependencies otherwise. [Note: hardirq and softirq contexts |
||
246 | * are separated from each other too.] |
||
247 | * |
||
248 | * The following field is used to detect when we cross into an |
||
249 | * interrupt context: |
||
250 | */ |
||
251 | unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */ |
||
252 | unsigned int trylock:1; /* 16 bits */ |
||
253 | |||
254 | unsigned int read:2; /* see lock_acquire() comment */ |
||
5056 | serge | 255 | unsigned int check:1; /* see lock_acquire() comment */ |
1408 | serge | 256 | unsigned int hardirqs_off:1; |
5056 | serge | 257 | unsigned int references:12; /* 32 bits */ |
1408 | serge | 258 | }; |
259 | |||
260 | /* |
||
261 | * Initialization, self-test and debugging-output methods: |
||
262 | */ |
||
263 | extern void lockdep_init(void); |
||
264 | extern void lockdep_info(void); |
||
265 | extern void lockdep_reset(void); |
||
266 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
||
267 | extern void lockdep_free_key_range(void *start, unsigned long size); |
||
5056 | serge | 268 | extern asmlinkage void lockdep_sys_exit(void); |
1408 | serge | 269 | |
270 | extern void lockdep_off(void); |
||
271 | extern void lockdep_on(void); |
||
272 | |||
273 | /* |
||
274 | * These methods are used by specific locking variants (spinlocks, |
||
275 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
||
276 | * to lockdep: |
||
277 | */ |
||
278 | |||
279 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
||
280 | struct lock_class_key *key, int subclass); |
||
281 | |||
282 | /* |
||
283 | * To initialize a lockdep_map statically use this macro. |
||
284 | * Note that _name must not be NULL. |
||
285 | */ |
||
286 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ |
||
287 | { .name = (_name), .key = (void *)(_key), } |
||
288 | |||
289 | /* |
||
290 | * Reinitialize a lock key - for cases where there is special locking or |
||
291 | * special initialization of locks so that the validator gets the scope |
||
292 | * of dependencies wrong: they are either too broad (they need a class-split) |
||
293 | * or they are too narrow (they suffer from a false class-split): |
||
294 | */ |
||
295 | #define lockdep_set_class(lock, key) \ |
||
296 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
||
297 | #define lockdep_set_class_and_name(lock, key, name) \ |
||
298 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
||
299 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
||
300 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) |
||
301 | #define lockdep_set_subclass(lock, sub) \ |
||
302 | lockdep_init_map(&(lock)->dep_map, #lock, \ |
||
303 | (lock)->dep_map.key, sub) |
||
1964 | serge | 304 | |
305 | #define lockdep_set_novalidate_class(lock) \ |
||
5056 | serge | 306 | lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
1408 | serge | 307 | /* |
308 | * Compare locking classes |
||
309 | */ |
||
310 | #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) |
||
311 | |||
312 | static inline int lockdep_match_key(struct lockdep_map *lock, |
||
313 | struct lock_class_key *key) |
||
314 | { |
||
315 | return lock->key == key; |
||
316 | } |
||
317 | |||
318 | /* |
||
319 | * Acquire a lock. |
||
320 | * |
||
321 | * Values for "read": |
||
322 | * |
||
323 | * 0: exclusive (write) acquire |
||
324 | * 1: read-acquire (no recursion allowed) |
||
325 | * 2: read-acquire with same-instance recursion allowed |
||
326 | * |
||
327 | * Values for check: |
||
328 | * |
||
5056 | serge | 329 | * 0: simple checks (freeing, held-at-exit-time, etc.) |
330 | * 1: full validation |
||
1408 | serge | 331 | */ |
332 | extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
||
333 | int trylock, int read, int check, |
||
334 | struct lockdep_map *nest_lock, unsigned long ip); |
||
335 | |||
336 | extern void lock_release(struct lockdep_map *lock, int nested, |
||
337 | unsigned long ip); |
||
338 | |||
339 | #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) |
||
340 | |||
341 | extern int lock_is_held(struct lockdep_map *lock); |
||
342 | |||
343 | extern void lock_set_class(struct lockdep_map *lock, const char *name, |
||
344 | struct lock_class_key *key, unsigned int subclass, |
||
345 | unsigned long ip); |
||
346 | |||
347 | static inline void lock_set_subclass(struct lockdep_map *lock, |
||
348 | unsigned int subclass, unsigned long ip) |
||
349 | { |
||
350 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
||
351 | } |
||
352 | |||
353 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
||
354 | extern void lockdep_clear_current_reclaim_state(void); |
||
355 | extern void lockdep_trace_alloc(gfp_t mask); |
||
356 | |||
357 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, |
||
358 | |||
359 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
||
360 | |||
5056 | serge | 361 | #define lockdep_assert_held(l) do { \ |
362 | WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
||
363 | } while (0) |
||
1408 | serge | 364 | |
5270 | serge | 365 | #define lockdep_assert_held_once(l) do { \ |
366 | WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
||
367 | } while (0) |
||
368 | |||
5056 | serge | 369 | #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
1408 | serge | 370 | |
5056 | serge | 371 | #else /* !CONFIG_LOCKDEP */ |
372 | |||
1408 | serge | 373 | static inline void lockdep_off(void) |
374 | { |
||
375 | } |
||
376 | |||
377 | static inline void lockdep_on(void) |
||
378 | { |
||
379 | } |
||
380 | |||
381 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
||
382 | # define lock_release(l, n, i) do { } while (0) |
||
383 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
||
384 | # define lock_set_subclass(l, s, i) do { } while (0) |
||
385 | # define lockdep_set_current_reclaim_state(g) do { } while (0) |
||
386 | # define lockdep_clear_current_reclaim_state() do { } while (0) |
||
387 | # define lockdep_trace_alloc(g) do { } while (0) |
||
388 | # define lockdep_init() do { } while (0) |
||
389 | # define lockdep_info() do { } while (0) |
||
390 | # define lockdep_init_map(lock, name, key, sub) \ |
||
391 | do { (void)(name); (void)(key); } while (0) |
||
392 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
||
393 | # define lockdep_set_class_and_name(lock, key, name) \ |
||
394 | do { (void)(key); (void)(name); } while (0) |
||
395 | #define lockdep_set_class_and_subclass(lock, key, sub) \ |
||
396 | do { (void)(key); } while (0) |
||
397 | #define lockdep_set_subclass(lock, sub) do { } while (0) |
||
1964 | serge | 398 | |
399 | #define lockdep_set_novalidate_class(lock) do { } while (0) |
||
400 | |||
1408 | serge | 401 | /* |
402 | * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP |
||
403 | * case since the result is not well defined and the caller should rather |
||
404 | * #ifdef the call himself. |
||
405 | */ |
||
406 | |||
407 | # define INIT_LOCKDEP |
||
408 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
||
409 | # define lockdep_free_key_range(start, size) do { } while (0) |
||
410 | # define lockdep_sys_exit() do { } while (0) |
||
411 | /* |
||
412 | * The class key takes no space if lockdep is disabled: |
||
413 | */ |
||
414 | struct lock_class_key { }; |
||
415 | |||
416 | #define lockdep_depth(tsk) (0) |
||
417 | |||
5056 | serge | 418 | #define lockdep_assert_held(l) do { (void)(l); } while (0) |
5270 | serge | 419 | #define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
1408 | serge | 420 | |
3120 | serge | 421 | #define lockdep_recursing(tsk) (0) |
422 | |||
1408 | serge | 423 | #endif /* !LOCKDEP */ |
424 | |||
425 | #ifdef CONFIG_LOCK_STAT |
||
426 | |||
427 | extern void lock_contended(struct lockdep_map *lock, unsigned long ip); |
||
428 | extern void lock_acquired(struct lockdep_map *lock, unsigned long ip); |
||
429 | |||
430 | #define LOCK_CONTENDED(_lock, try, lock) \ |
||
431 | do { \ |
||
432 | if (!try(_lock)) { \ |
||
433 | lock_contended(&(_lock)->dep_map, _RET_IP_); \ |
||
434 | lock(_lock); \ |
||
435 | } \ |
||
436 | lock_acquired(&(_lock)->dep_map, _RET_IP_); \ |
||
437 | } while (0) |
||
438 | |||
439 | #else /* CONFIG_LOCK_STAT */ |
||
440 | |||
441 | #define lock_contended(lockdep_map, ip) do {} while (0) |
||
442 | #define lock_acquired(lockdep_map, ip) do {} while (0) |
||
443 | |||
444 | #define LOCK_CONTENDED(_lock, try, lock) \ |
||
445 | lock(_lock) |
||
446 | |||
447 | #endif /* CONFIG_LOCK_STAT */ |
||
448 | |||
449 | #ifdef CONFIG_LOCKDEP |
||
450 | |||
451 | /* |
||
452 | * On lockdep we dont want the hand-coded irq-enable of |
||
453 | * _raw_*_lock_flags() code, because lockdep assumes |
||
454 | * that interrupts are not re-enabled during lock-acquire: |
||
455 | */ |
||
456 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
||
457 | LOCK_CONTENDED((_lock), (try), (lock)) |
||
458 | |||
459 | #else /* CONFIG_LOCKDEP */ |
||
460 | |||
461 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ |
||
462 | lockfl((_lock), (flags)) |
||
463 | |||
464 | #endif /* CONFIG_LOCKDEP */ |
||
465 | |||
466 | #ifdef CONFIG_TRACE_IRQFLAGS |
||
467 | extern void print_irqtrace_events(struct task_struct *curr); |
||
468 | #else |
||
469 | static inline void print_irqtrace_events(struct task_struct *curr) |
||
470 | { |
||
471 | } |
||
472 | #endif |
||
473 | |||
474 | /* |
||
475 | * For trivial one-depth nesting of a lock-class, the following |
||
476 | * global define can be used. (Subsystems with multiple levels |
||
477 | * of nesting should define their own lock-nesting subclasses.) |
||
478 | */ |
||
479 | #define SINGLE_DEPTH_NESTING 1 |
||
480 | |||
481 | /* |
||
482 | * Map the dependency ops to NOP or to real lockdep ops, depending |
||
483 | * on the per lock-class debug mode: |
||
484 | */ |
||
485 | |||
5270 | serge | 486 | #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
487 | #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
||
488 | #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
||
1408 | serge | 489 | |
5270 | serge | 490 | #define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
491 | #define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
||
492 | #define spin_release(l, n, i) lock_release(l, n, i) |
||
1408 | serge | 493 | |
5270 | serge | 494 | #define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
495 | #define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
||
496 | #define rwlock_release(l, n, i) lock_release(l, n, i) |
||
1408 | serge | 497 | |
5270 | serge | 498 | #define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
499 | #define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
||
500 | #define seqcount_release(l, n, i) lock_release(l, n, i) |
||
1408 | serge | 501 | |
5270 | serge | 502 | #define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
503 | #define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
||
504 | #define mutex_release(l, n, i) lock_release(l, n, i) |
||
1408 | serge | 505 | |
5270 | serge | 506 | #define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
507 | #define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
||
508 | #define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
||
509 | #define rwsem_release(l, n, i) lock_release(l, n, i) |
||
510 | |||
511 | #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
||
512 | #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
||
513 | #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
||
514 | #define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
||
515 | |||
1408 | serge | 516 | #ifdef CONFIG_PROVE_LOCKING |
517 | # define might_lock(lock) \ |
||
518 | do { \ |
||
519 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
||
5056 | serge | 520 | lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
1408 | serge | 521 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
522 | } while (0) |
||
523 | # define might_lock_read(lock) \ |
||
524 | do { \ |
||
525 | typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
||
5056 | serge | 526 | lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
1408 | serge | 527 | lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
528 | } while (0) |
||
529 | #else |
||
530 | # define might_lock(lock) do { } while (0) |
||
531 | # define might_lock_read(lock) do { } while (0) |
||
532 | #endif |
||
533 | |||
1964 | serge | 534 | #ifdef CONFIG_PROVE_RCU |
2967 | Serge | 535 | void lockdep_rcu_suspicious(const char *file, const int line, const char *s); |
1964 | serge | 536 | #endif |
537 | |||
1408 | serge | 538 | #endif /* __LINUX_LOCKDEP_H */><>> |