Subversion Repositories Kolibri OS

Rev

Rev 3243 | Rev 5270 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Runtime locking correctness validator
  3.  *
  4.  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5.  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  6.  *
  7.  * see Documentation/lockdep-design.txt for more details.
  8.  */
  9. #ifndef __LINUX_LOCKDEP_H
  10. #define __LINUX_LOCKDEP_H
  11.  
  12. struct task_struct;
  13. struct lockdep_map;
  14.  
  15. #ifdef CONFIG_LOCKDEP
  16.  
  17. #include <linux/linkage.h>
  18. #include <linux/list.h>
  19. #include <linux/debug_locks.h>
  20. #include <linux/stacktrace.h>
  21.  
  22. /*
  23.  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  24.  * the total number of states... :-(
  25.  */
  26. #define XXX_LOCK_USAGE_STATES           (1+3*4)
  27.  
  28. #define MAX_LOCKDEP_SUBCLASSES          8UL
  29.  
  30. /*
  31.  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
  32.  * cached in the instance of lockdep_map
  33.  *
  34.  * Currently main class (subclass == 0) and signle depth subclass
  35.  * are cached in lockdep_map. This optimization is mainly targeting
  36.  * on rq->lock. double_rq_lock() acquires this highly competitive with
  37.  * single depth.
  38.  */
  39. #define NR_LOCKDEP_CACHING_CLASSES      2
  40.  
  41. /*
  42.  * Lock-classes are keyed via unique addresses, by embedding the
  43.  * lockclass-key into the kernel (or module) .data section. (For
  44.  * static locks we use the lock address itself as the key.)
  45.  */
  46. struct lockdep_subclass_key {
  47.         char __one_byte;
  48. } __attribute__ ((__packed__));
  49.  
  50. struct lock_class_key {
  51.         struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
  52. };
  53.  
  54. #define LOCKSTAT_POINTS         4
  55.  
  56. /*
  57.  * The lock-class itself:
  58.  */
  59. struct lock_class {
  60.         /*
  61.          * class-hash:
  62.          */
  63.         struct list_head                hash_entry;
  64.  
  65.         /*
  66.          * global list of all lock-classes:
  67.          */
  68.         struct list_head                lock_entry;
  69.  
  70.         struct lockdep_subclass_key     *key;
  71.         unsigned int                    subclass;
  72.         unsigned int                    dep_gen_id;
  73.  
  74.         /*
  75.          * IRQ/softirq usage tracking bits:
  76.          */
  77.         unsigned long                   usage_mask;
  78.         struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
  79.  
  80.         /*
  81.          * These fields represent a directed graph of lock dependencies,
  82.          * to every node we attach a list of "forward" and a list of
  83.          * "backward" graph nodes.
  84.          */
  85.         struct list_head                locks_after, locks_before;
  86.  
  87.         /*
  88.          * Generation counter, when doing certain classes of graph walking,
  89.          * to ensure that we check one node only once:
  90.          */
  91.         unsigned int                    version;
  92.  
  93.         /*
  94.          * Statistics counter:
  95.          */
  96.         unsigned long                   ops;
  97.  
  98.         const char                      *name;
  99.         int                             name_version;
  100.  
  101. #ifdef CONFIG_LOCK_STAT
  102.         unsigned long                   contention_point[LOCKSTAT_POINTS];
  103.         unsigned long                   contending_point[LOCKSTAT_POINTS];
  104. #endif
  105. };
  106.  
  107. #ifdef CONFIG_LOCK_STAT
  108. struct lock_time {
  109.         s64                             min;
  110.         s64                             max;
  111.         s64                             total;
  112.         unsigned long                   nr;
  113. };
  114.  
  115. enum bounce_type {
  116.         bounce_acquired_write,
  117.         bounce_acquired_read,
  118.         bounce_contended_write,
  119.         bounce_contended_read,
  120.         nr_bounce_types,
  121.  
  122.         bounce_acquired = bounce_acquired_write,
  123.         bounce_contended = bounce_contended_write,
  124. };
  125.  
  126. struct lock_class_stats {
  127.         unsigned long                   contention_point[4];
  128.         unsigned long                   contending_point[4];
  129.         struct lock_time                read_waittime;
  130.         struct lock_time                write_waittime;
  131.         struct lock_time                read_holdtime;
  132.         struct lock_time                write_holdtime;
  133.         unsigned long                   bounces[nr_bounce_types];
  134. };
  135.  
  136. struct lock_class_stats lock_stats(struct lock_class *class);
  137. void clear_lock_stats(struct lock_class *class);
  138. #endif
  139.  
  140. /*
  141.  * Map the lock object (the lock instance) to the lock-class object.
  142.  * This is embedded into specific lock instances:
  143.  */
  144. struct lockdep_map {
  145.         struct lock_class_key           *key;
  146.         struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
  147.         const char                      *name;
  148. #ifdef CONFIG_LOCK_STAT
  149.         int                             cpu;
  150.         unsigned long                   ip;
  151. #endif
  152. };
  153.  
  154. /*
  155.  * Every lock has a list of other locks that were taken after it.
  156.  * We only grow the list, never remove from it:
  157.  */
  158. struct lock_list {
  159.         struct list_head                entry;
  160.         struct lock_class               *class;
  161.         struct stack_trace              trace;
  162.         int                             distance;
  163.  
  164.         /*
  165.          * The parent field is used to implement breadth-first search, and the
  166.          * bit 0 is reused to indicate if the lock has been accessed in BFS.
  167.          */
  168.         struct lock_list                *parent;
  169. };
  170.  
  171. /*
  172.  * We record lock dependency chains, so that we can cache them:
  173.  */
  174. struct lock_chain {
  175.         u8                              irq_context;
  176.         u8                              depth;
  177.         u16                             base;
  178.         struct list_head                entry;
  179.         u64                             chain_key;
  180. };
  181.  
  182. #define MAX_LOCKDEP_KEYS_BITS           13
  183. /*
  184.  * Subtract one because we offset hlock->class_idx by 1 in order
  185.  * to make 0 mean no class. This avoids overflowing the class_idx
  186.  * bitfield and hitting the BUG in hlock_class().
  187.  */
  188. #define MAX_LOCKDEP_KEYS                ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
  189.  
  190. struct held_lock {
  191.         /*
  192.          * One-way hash of the dependency chain up to this point. We
  193.          * hash the hashes step by step as the dependency chain grows.
  194.          *
  195.          * We use it for dependency-caching and we skip detection
  196.          * passes and dependency-updates if there is a cache-hit, so
  197.          * it is absolutely critical for 100% coverage of the validator
  198.          * to have a unique key value for every unique dependency path
  199.          * that can occur in the system, to make a unique hash value
  200.          * as likely as possible - hence the 64-bit width.
  201.          *
  202.          * The task struct holds the current hash value (initialized
  203.          * with zero), here we store the previous hash value:
  204.          */
  205.         u64                             prev_chain_key;
  206.         unsigned long                   acquire_ip;
  207.         struct lockdep_map              *instance;
  208.         struct lockdep_map              *nest_lock;
  209. #ifdef CONFIG_LOCK_STAT
  210.         u64                             waittime_stamp;
  211.         u64                             holdtime_stamp;
  212. #endif
  213.         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
  214.         /*
  215.          * The lock-stack is unified in that the lock chains of interrupt
  216.          * contexts nest ontop of process context chains, but we 'separate'
  217.          * the hashes by starting with 0 if we cross into an interrupt
  218.          * context, and we also keep do not add cross-context lock
  219.          * dependencies - the lock usage graph walking covers that area
  220.          * anyway, and we'd just unnecessarily increase the number of
  221.          * dependencies otherwise. [Note: hardirq and softirq contexts
  222.          * are separated from each other too.]
  223.          *
  224.          * The following field is used to detect when we cross into an
  225.          * interrupt context:
  226.          */
  227.         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
  228.         unsigned int trylock:1;                                         /* 16 bits */
  229.  
  230.         unsigned int read:2;        /* see lock_acquire() comment */
  231.         unsigned int check:1;       /* see lock_acquire() comment */
  232.         unsigned int hardirqs_off:1;
  233.         unsigned int references:12;                                     /* 32 bits */
  234. };
  235.  
  236. /*
  237.  * Initialization, self-test and debugging-output methods:
  238.  */
  239. extern void lockdep_init(void);
  240. extern void lockdep_info(void);
  241. extern void lockdep_reset(void);
  242. extern void lockdep_reset_lock(struct lockdep_map *lock);
  243. extern void lockdep_free_key_range(void *start, unsigned long size);
  244. extern asmlinkage void lockdep_sys_exit(void);
  245.  
  246. extern void lockdep_off(void);
  247. extern void lockdep_on(void);
  248.  
  249. /*
  250.  * These methods are used by specific locking variants (spinlocks,
  251.  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
  252.  * to lockdep:
  253.  */
  254.  
  255. extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
  256.                              struct lock_class_key *key, int subclass);
  257.  
  258. /*
  259.  * To initialize a lockdep_map statically use this macro.
  260.  * Note that _name must not be NULL.
  261.  */
  262. #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
  263.         { .name = (_name), .key = (void *)(_key), }
  264.  
  265. /*
  266.  * Reinitialize a lock key - for cases where there is special locking or
  267.  * special initialization of locks so that the validator gets the scope
  268.  * of dependencies wrong: they are either too broad (they need a class-split)
  269.  * or they are too narrow (they suffer from a false class-split):
  270.  */
  271. #define lockdep_set_class(lock, key) \
  272.                 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
  273. #define lockdep_set_class_and_name(lock, key, name) \
  274.                 lockdep_init_map(&(lock)->dep_map, name, key, 0)
  275. #define lockdep_set_class_and_subclass(lock, key, sub) \
  276.                 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
  277. #define lockdep_set_subclass(lock, sub) \
  278.                 lockdep_init_map(&(lock)->dep_map, #lock, \
  279.                                  (lock)->dep_map.key, sub)
  280.  
  281. #define lockdep_set_novalidate_class(lock) \
  282.         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
  283. /*
  284.  * Compare locking classes
  285.  */
  286. #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
  287.  
  288. static inline int lockdep_match_key(struct lockdep_map *lock,
  289.                                     struct lock_class_key *key)
  290. {
  291.         return lock->key == key;
  292. }
  293.  
  294. /*
  295.  * Acquire a lock.
  296.  *
  297.  * Values for "read":
  298.  *
  299.  *   0: exclusive (write) acquire
  300.  *   1: read-acquire (no recursion allowed)
  301.  *   2: read-acquire with same-instance recursion allowed
  302.  *
  303.  * Values for check:
  304.  *
  305.  *   0: simple checks (freeing, held-at-exit-time, etc.)
  306.  *   1: full validation
  307.  */
  308. extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
  309.                          int trylock, int read, int check,
  310.                          struct lockdep_map *nest_lock, unsigned long ip);
  311.  
  312. extern void lock_release(struct lockdep_map *lock, int nested,
  313.                          unsigned long ip);
  314.  
  315. #define lockdep_is_held(lock)   lock_is_held(&(lock)->dep_map)
  316.  
  317. extern int lock_is_held(struct lockdep_map *lock);
  318.  
  319. extern void lock_set_class(struct lockdep_map *lock, const char *name,
  320.                            struct lock_class_key *key, unsigned int subclass,
  321.                            unsigned long ip);
  322.  
  323. static inline void lock_set_subclass(struct lockdep_map *lock,
  324.                 unsigned int subclass, unsigned long ip)
  325. {
  326.         lock_set_class(lock, lock->name, lock->key, subclass, ip);
  327. }
  328.  
  329. extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
  330. extern void lockdep_clear_current_reclaim_state(void);
  331. extern void lockdep_trace_alloc(gfp_t mask);
  332.  
  333. # define INIT_LOCKDEP                           .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
  334.  
  335. #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
  336.  
  337. #define lockdep_assert_held(l)  do {                            \
  338.                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
  339.         } while (0)
  340.  
  341. #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
  342.  
  343. #else /* !CONFIG_LOCKDEP */
  344.  
  345. static inline void lockdep_off(void)
  346. {
  347. }
  348.  
  349. static inline void lockdep_on(void)
  350. {
  351. }
  352.  
  353. # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
  354. # define lock_release(l, n, i)                  do { } while (0)
  355. # define lock_set_class(l, n, k, s, i)          do { } while (0)
  356. # define lock_set_subclass(l, s, i)             do { } while (0)
  357. # define lockdep_set_current_reclaim_state(g)   do { } while (0)
  358. # define lockdep_clear_current_reclaim_state()  do { } while (0)
  359. # define lockdep_trace_alloc(g)                 do { } while (0)
  360. # define lockdep_init()                         do { } while (0)
  361. # define lockdep_info()                         do { } while (0)
  362. # define lockdep_init_map(lock, name, key, sub) \
  363.                 do { (void)(name); (void)(key); } while (0)
  364. # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
  365. # define lockdep_set_class_and_name(lock, key, name) \
  366.                 do { (void)(key); (void)(name); } while (0)
  367. #define lockdep_set_class_and_subclass(lock, key, sub) \
  368.                 do { (void)(key); } while (0)
  369. #define lockdep_set_subclass(lock, sub)         do { } while (0)
  370.  
  371. #define lockdep_set_novalidate_class(lock) do { } while (0)
  372.  
  373. /*
  374.  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
  375.  * case since the result is not well defined and the caller should rather
  376.  * #ifdef the call himself.
  377.  */
  378.  
  379. # define INIT_LOCKDEP
  380. # define lockdep_reset()                do { debug_locks = 1; } while (0)
  381. # define lockdep_free_key_range(start, size)    do { } while (0)
  382. # define lockdep_sys_exit()                     do { } while (0)
  383. /*
  384.  * The class key takes no space if lockdep is disabled:
  385.  */
  386. struct lock_class_key { };
  387.  
  388. #define lockdep_depth(tsk)      (0)
  389.  
  390. #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
  391.  
  392. #define lockdep_recursing(tsk)                  (0)
  393.  
  394. #endif /* !LOCKDEP */
  395.  
  396. #ifdef CONFIG_LOCK_STAT
  397.  
  398. extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
  399. extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
  400.  
  401. #define LOCK_CONTENDED(_lock, try, lock)                        \
  402. do {                                                            \
  403.         if (!try(_lock)) {                                      \
  404.                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
  405.                 lock(_lock);                                    \
  406.         }                                                       \
  407.         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
  408. } while (0)
  409.  
  410. #else /* CONFIG_LOCK_STAT */
  411.  
  412. #define lock_contended(lockdep_map, ip) do {} while (0)
  413. #define lock_acquired(lockdep_map, ip) do {} while (0)
  414.  
  415. #define LOCK_CONTENDED(_lock, try, lock) \
  416.         lock(_lock)
  417.  
  418. #endif /* CONFIG_LOCK_STAT */
  419.  
  420. #ifdef CONFIG_LOCKDEP
  421.  
  422. /*
  423.  * On lockdep we dont want the hand-coded irq-enable of
  424.  * _raw_*_lock_flags() code, because lockdep assumes
  425.  * that interrupts are not re-enabled during lock-acquire:
  426.  */
  427. #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  428.         LOCK_CONTENDED((_lock), (try), (lock))
  429.  
  430. #else /* CONFIG_LOCKDEP */
  431.  
  432. #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  433.         lockfl((_lock), (flags))
  434.  
  435. #endif /* CONFIG_LOCKDEP */
  436.  
  437. #ifdef CONFIG_TRACE_IRQFLAGS
  438. extern void print_irqtrace_events(struct task_struct *curr);
  439. #else
  440. static inline void print_irqtrace_events(struct task_struct *curr)
  441. {
  442. }
  443. #endif
  444.  
  445. /*
  446.  * For trivial one-depth nesting of a lock-class, the following
  447.  * global define can be used. (Subsystems with multiple levels
  448.  * of nesting should define their own lock-nesting subclasses.)
  449.  */
  450. #define SINGLE_DEPTH_NESTING                    1
  451.  
  452. /*
  453.  * Map the dependency ops to NOP or to real lockdep ops, depending
  454.  * on the per lock-class debug mode:
  455.  */
  456.  
  457. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  458. # ifdef CONFIG_PROVE_LOCKING
  459. #  define spin_acquire(l, s, t, i)              lock_acquire(l, s, t, 0, 2, NULL, i)
  460. #  define spin_acquire_nest(l, s, t, n, i)      lock_acquire(l, s, t, 0, 2, n, i)
  461. # else
  462. #  define spin_acquire(l, s, t, i)              lock_acquire(l, s, t, 0, 1, NULL, i)
  463. #  define spin_acquire_nest(l, s, t, n, i)      lock_acquire(l, s, t, 0, 1, NULL, i)
  464. # endif
  465. # define spin_release(l, n, i)                  lock_release(l, n, i)
  466. #else
  467. # define spin_acquire(l, s, t, i)               do { } while (0)
  468. # define spin_release(l, n, i)                  do { } while (0)
  469. #endif
  470.  
  471. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  472. # ifdef CONFIG_PROVE_LOCKING
  473. #  define rwlock_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 2, NULL, i)
  474. #  define rwlock_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 2, 2, NULL, i)
  475. # else
  476. #  define rwlock_acquire(l, s, t, i)            lock_acquire(l, s, t, 0, 1, NULL, i)
  477. #  define rwlock_acquire_read(l, s, t, i)       lock_acquire(l, s, t, 2, 1, NULL, i)
  478. # endif
  479. # define rwlock_release(l, n, i)                lock_release(l, n, i)
  480. #else
  481. # define rwlock_acquire(l, s, t, i)             do { } while (0)
  482. # define rwlock_acquire_read(l, s, t, i)        do { } while (0)
  483. # define rwlock_release(l, n, i)                do { } while (0)
  484. #endif
  485.  
  486. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  487. # ifdef CONFIG_PROVE_LOCKING
  488. #  define mutex_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 2, NULL, i)
  489. #  define mutex_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 2, n, i)
  490. # else
  491. #  define mutex_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 1, NULL, i)
  492. #  define mutex_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 1, n, i)
  493. # endif
  494. # define mutex_release(l, n, i)                 lock_release(l, n, i)
  495. #else
  496. # define mutex_acquire(l, s, t, i)              do { } while (0)
  497. # define mutex_acquire_nest(l, s, t, n, i)      do { } while (0)
  498. # define mutex_release(l, n, i)                 do { } while (0)
  499. #endif
  500.  
  501. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  502. # ifdef CONFIG_PROVE_LOCKING
  503. #  define rwsem_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 2, NULL, i)
  504. #  define rwsem_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 2, n, i)
  505. #  define rwsem_acquire_read(l, s, t, i)        lock_acquire(l, s, t, 1, 2, NULL, i)
  506. # else
  507. #  define rwsem_acquire(l, s, t, i)             lock_acquire(l, s, t, 0, 1, NULL, i)
  508. #  define rwsem_acquire_nest(l, s, t, n, i)     lock_acquire(l, s, t, 0, 1, n, i)
  509. #  define rwsem_acquire_read(l, s, t, i)        lock_acquire(l, s, t, 1, 1, NULL, i)
  510. # endif
  511. # define rwsem_release(l, n, i)                 lock_release(l, n, i)
  512. #else
  513. # define rwsem_acquire(l, s, t, i)              do { } while (0)
  514. # define rwsem_acquire_nest(l, s, t, n, i)      do { } while (0)
  515. # define rwsem_acquire_read(l, s, t, i)         do { } while (0)
  516. # define rwsem_release(l, n, i)                 do { } while (0)
  517. #endif
  518.  
  519. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  520. # ifdef CONFIG_PROVE_LOCKING
  521. #  define lock_map_acquire(l)           lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
  522. #  define lock_map_acquire_read(l)      lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
  523. # else
  524. #  define lock_map_acquire(l)           lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
  525. #  define lock_map_acquire_read(l)      lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
  526. # endif
  527. # define lock_map_release(l)                    lock_release(l, 1, _THIS_IP_)
  528. #else
  529. # define lock_map_acquire(l)                    do { } while (0)
  530. # define lock_map_acquire_read(l)               do { } while (0)
  531. # define lock_map_release(l)                    do { } while (0)
  532. #endif
  533.  
  534. #ifdef CONFIG_PROVE_LOCKING
  535. # define might_lock(lock)                                               \
  536. do {                                                                    \
  537.         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
  538.         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
  539.         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
  540. } while (0)
  541. # define might_lock_read(lock)                                          \
  542. do {                                                                    \
  543.         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
  544.         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
  545.         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
  546. } while (0)
  547. #else
  548. # define might_lock(lock) do { } while (0)
  549. # define might_lock_read(lock) do { } while (0)
  550. #endif
  551.  
  552. #ifdef CONFIG_PROVE_RCU
  553. void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
  554. #endif
  555.  
  556. #endif /* __LINUX_LOCKDEP_H */
  557.