Subversion Repositories Kolibri OS

Rev

Rev 6082 | Rev 6936 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Runtime locking correctness validator
  3.  *
  4.  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5.  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  6.  *
  7.  * see Documentation/locking/lockdep-design.txt for more details.
  8.  */
  9. #ifndef __LINUX_LOCKDEP_H
  10. #define __LINUX_LOCKDEP_H
  11.  
  12. struct task_struct;
  13. struct lockdep_map;
  14.  
  15. /* for sysctl */
  16. extern int prove_locking;
  17. extern int lock_stat;
  18.  
  19. #ifdef CONFIG_LOCKDEP
  20.  
  21. #include <linux/linkage.h>
  22. #include <linux/list.h>
  23. #include <linux/debug_locks.h>
  24. #include <linux/stacktrace.h>
  25.  
  26. /*
  27.  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
  28.  * the total number of states... :-(
  29.  */
  30. #define XXX_LOCK_USAGE_STATES           (1+3*4)
  31.  
  32. #define MAX_LOCKDEP_SUBCLASSES          8UL
  33.  
  34. /*
  35.  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
  36.  * cached in the instance of lockdep_map
  37.  *
  38.  * Currently main class (subclass == 0) and signle depth subclass
  39.  * are cached in lockdep_map. This optimization is mainly targeting
  40.  * on rq->lock. double_rq_lock() acquires this highly competitive with
  41.  * single depth.
  42.  */
  43. #define NR_LOCKDEP_CACHING_CLASSES      2
  44.  
  45. /*
  46.  * Lock-classes are keyed via unique addresses, by embedding the
  47.  * lockclass-key into the kernel (or module) .data section. (For
  48.  * static locks we use the lock address itself as the key.)
  49.  */
  50. struct lockdep_subclass_key {
  51.         char __one_byte;
  52. } __attribute__ ((__packed__));
  53.  
  54. struct lock_class_key {
  55.         struct lockdep_subclass_key     subkeys[MAX_LOCKDEP_SUBCLASSES];
  56. };
  57.  
  58. extern struct lock_class_key __lockdep_no_validate__;
  59.  
  60. #define LOCKSTAT_POINTS         4
  61.  
  62. /*
  63.  * The lock-class itself:
  64.  */
  65. struct lock_class {
  66.         /*
  67.          * class-hash:
  68.          */
  69.         struct list_head                hash_entry;
  70.  
  71.         /*
  72.          * global list of all lock-classes:
  73.          */
  74.         struct list_head                lock_entry;
  75.  
  76.         struct lockdep_subclass_key     *key;
  77.         unsigned int                    subclass;
  78.         unsigned int                    dep_gen_id;
  79.  
  80.         /*
  81.          * IRQ/softirq usage tracking bits:
  82.          */
  83.         unsigned long                   usage_mask;
  84.         struct stack_trace              usage_traces[XXX_LOCK_USAGE_STATES];
  85.  
  86.         /*
  87.          * These fields represent a directed graph of lock dependencies,
  88.          * to every node we attach a list of "forward" and a list of
  89.          * "backward" graph nodes.
  90.          */
  91.         struct list_head                locks_after, locks_before;
  92.  
  93.         /*
  94.          * Generation counter, when doing certain classes of graph walking,
  95.          * to ensure that we check one node only once:
  96.          */
  97.         unsigned int                    version;
  98.  
  99.         /*
  100.          * Statistics counter:
  101.          */
  102.         unsigned long                   ops;
  103.  
  104.         const char                      *name;
  105.         int                             name_version;
  106.  
  107. #ifdef CONFIG_LOCK_STAT
  108.         unsigned long                   contention_point[LOCKSTAT_POINTS];
  109.         unsigned long                   contending_point[LOCKSTAT_POINTS];
  110. #endif
  111. };
  112.  
  113. #ifdef CONFIG_LOCK_STAT
  114. struct lock_time {
  115.         s64                             min;
  116.         s64                             max;
  117.         s64                             total;
  118.         unsigned long                   nr;
  119. };
  120.  
  121. enum bounce_type {
  122.         bounce_acquired_write,
  123.         bounce_acquired_read,
  124.         bounce_contended_write,
  125.         bounce_contended_read,
  126.         nr_bounce_types,
  127.  
  128.         bounce_acquired = bounce_acquired_write,
  129.         bounce_contended = bounce_contended_write,
  130. };
  131.  
  132. struct lock_class_stats {
  133.         unsigned long                   contention_point[LOCKSTAT_POINTS];
  134.         unsigned long                   contending_point[LOCKSTAT_POINTS];
  135.         struct lock_time                read_waittime;
  136.         struct lock_time                write_waittime;
  137.         struct lock_time                read_holdtime;
  138.         struct lock_time                write_holdtime;
  139.         unsigned long                   bounces[nr_bounce_types];
  140. };
  141.  
  142. struct lock_class_stats lock_stats(struct lock_class *class);
  143. void clear_lock_stats(struct lock_class *class);
  144. #endif
  145.  
  146. /*
  147.  * Map the lock object (the lock instance) to the lock-class object.
  148.  * This is embedded into specific lock instances:
  149.  */
  150. struct lockdep_map {
  151.         struct lock_class_key           *key;
  152.         struct lock_class               *class_cache[NR_LOCKDEP_CACHING_CLASSES];
  153.         const char                      *name;
  154. #ifdef CONFIG_LOCK_STAT
  155.         int                             cpu;
  156.         unsigned long                   ip;
  157. #endif
  158. };
  159.  
  160. static inline void lockdep_copy_map(struct lockdep_map *to,
  161.                                     struct lockdep_map *from)
  162. {
  163.         int i;
  164.  
  165.         *to = *from;
  166.         /*
  167.          * Since the class cache can be modified concurrently we could observe
  168.          * half pointers (64bit arch using 32bit copy insns). Therefore clear
  169.          * the caches and take the performance hit.
  170.          *
  171.          * XXX it doesn't work well with lockdep_set_class_and_subclass(), since
  172.          *     that relies on cache abuse.
  173.          */
  174.         for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
  175.                 to->class_cache[i] = NULL;
  176. }
  177.  
  178. /*
  179.  * Every lock has a list of other locks that were taken after it.
  180.  * We only grow the list, never remove from it:
  181.  */
  182. struct lock_list {
  183.         struct list_head                entry;
  184.         struct lock_class               *class;
  185.         struct stack_trace              trace;
  186.         int                             distance;
  187.  
  188.         /*
  189.          * The parent field is used to implement breadth-first search, and the
  190.          * bit 0 is reused to indicate if the lock has been accessed in BFS.
  191.          */
  192.         struct lock_list                *parent;
  193. };
  194.  
  195. /*
  196.  * We record lock dependency chains, so that we can cache them:
  197.  */
  198. struct lock_chain {
  199.         u8                              irq_context;
  200.         u8                              depth;
  201.         u16                             base;
  202.         struct list_head                entry;
  203.         u64                             chain_key;
  204. };
  205.  
  206. #define MAX_LOCKDEP_KEYS_BITS           13
  207. /*
  208.  * Subtract one because we offset hlock->class_idx by 1 in order
  209.  * to make 0 mean no class. This avoids overflowing the class_idx
  210.  * bitfield and hitting the BUG in hlock_class().
  211.  */
  212. #define MAX_LOCKDEP_KEYS                ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
  213.  
  214. struct held_lock {
  215.         /*
  216.          * One-way hash of the dependency chain up to this point. We
  217.          * hash the hashes step by step as the dependency chain grows.
  218.          *
  219.          * We use it for dependency-caching and we skip detection
  220.          * passes and dependency-updates if there is a cache-hit, so
  221.          * it is absolutely critical for 100% coverage of the validator
  222.          * to have a unique key value for every unique dependency path
  223.          * that can occur in the system, to make a unique hash value
  224.          * as likely as possible - hence the 64-bit width.
  225.          *
  226.          * The task struct holds the current hash value (initialized
  227.          * with zero), here we store the previous hash value:
  228.          */
  229.         u64                             prev_chain_key;
  230.         unsigned long                   acquire_ip;
  231.         struct lockdep_map              *instance;
  232.         struct lockdep_map              *nest_lock;
  233. #ifdef CONFIG_LOCK_STAT
  234.         u64                             waittime_stamp;
  235.         u64                             holdtime_stamp;
  236. #endif
  237.         unsigned int                    class_idx:MAX_LOCKDEP_KEYS_BITS;
  238.         /*
  239.          * The lock-stack is unified in that the lock chains of interrupt
  240.          * contexts nest ontop of process context chains, but we 'separate'
  241.          * the hashes by starting with 0 if we cross into an interrupt
  242.          * context, and we also keep do not add cross-context lock
  243.          * dependencies - the lock usage graph walking covers that area
  244.          * anyway, and we'd just unnecessarily increase the number of
  245.          * dependencies otherwise. [Note: hardirq and softirq contexts
  246.          * are separated from each other too.]
  247.          *
  248.          * The following field is used to detect when we cross into an
  249.          * interrupt context:
  250.          */
  251.         unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
  252.         unsigned int trylock:1;                                         /* 16 bits */
  253.  
  254.         unsigned int read:2;        /* see lock_acquire() comment */
  255.         unsigned int check:1;       /* see lock_acquire() comment */
  256.         unsigned int hardirqs_off:1;
  257.         unsigned int references:12;                                     /* 32 bits */
  258.         unsigned int pin_count;
  259. };
  260.  
  261. /*
  262.  * Initialization, self-test and debugging-output methods:
  263.  */
  264. extern void lockdep_init(void);
  265. extern void lockdep_info(void);
  266. extern void lockdep_reset(void);
  267. extern void lockdep_reset_lock(struct lockdep_map *lock);
  268. extern void lockdep_free_key_range(void *start, unsigned long size);
  269. extern asmlinkage void lockdep_sys_exit(void);
  270.  
  271. extern void lockdep_off(void);
  272. extern void lockdep_on(void);
  273.  
  274. /*
  275.  * These methods are used by specific locking variants (spinlocks,
  276.  * rwlocks, mutexes and rwsems) to pass init/acquire/release events
  277.  * to lockdep:
  278.  */
  279.  
  280. extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
  281.                              struct lock_class_key *key, int subclass);
  282.  
  283. /*
  284.  * To initialize a lockdep_map statically use this macro.
  285.  * Note that _name must not be NULL.
  286.  */
  287. #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
  288.         { .name = (_name), .key = (void *)(_key), }
  289.  
  290. /*
  291.  * Reinitialize a lock key - for cases where there is special locking or
  292.  * special initialization of locks so that the validator gets the scope
  293.  * of dependencies wrong: they are either too broad (they need a class-split)
  294.  * or they are too narrow (they suffer from a false class-split):
  295.  */
  296. #define lockdep_set_class(lock, key) \
  297.                 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
  298. #define lockdep_set_class_and_name(lock, key, name) \
  299.                 lockdep_init_map(&(lock)->dep_map, name, key, 0)
  300. #define lockdep_set_class_and_subclass(lock, key, sub) \
  301.                 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
  302. #define lockdep_set_subclass(lock, sub) \
  303.                 lockdep_init_map(&(lock)->dep_map, #lock, \
  304.                                  (lock)->dep_map.key, sub)
  305.  
  306. #define lockdep_set_novalidate_class(lock) \
  307.         lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
  308. /*
  309.  * Compare locking classes
  310.  */
  311. #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
  312.  
  313. static inline int lockdep_match_key(struct lockdep_map *lock,
  314.                                     struct lock_class_key *key)
  315. {
  316.         return lock->key == key;
  317. }
  318.  
  319. /*
  320.  * Acquire a lock.
  321.  *
  322.  * Values for "read":
  323.  *
  324.  *   0: exclusive (write) acquire
  325.  *   1: read-acquire (no recursion allowed)
  326.  *   2: read-acquire with same-instance recursion allowed
  327.  *
  328.  * Values for check:
  329.  *
  330.  *   0: simple checks (freeing, held-at-exit-time, etc.)
  331.  *   1: full validation
  332.  */
  333. extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
  334.                          int trylock, int read, int check,
  335.                          struct lockdep_map *nest_lock, unsigned long ip);
  336.  
  337. extern void lock_release(struct lockdep_map *lock, int nested,
  338.                          unsigned long ip);
  339.  
  340. #define lockdep_is_held(lock)   lock_is_held(&(lock)->dep_map)
  341.  
  342. extern int lock_is_held(struct lockdep_map *lock);
  343.  
  344. extern void lock_set_class(struct lockdep_map *lock, const char *name,
  345.                            struct lock_class_key *key, unsigned int subclass,
  346.                            unsigned long ip);
  347.  
  348. static inline void lock_set_subclass(struct lockdep_map *lock,
  349.                 unsigned int subclass, unsigned long ip)
  350. {
  351.         lock_set_class(lock, lock->name, lock->key, subclass, ip);
  352. }
  353.  
  354. extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
  355. extern void lockdep_clear_current_reclaim_state(void);
  356. extern void lockdep_trace_alloc(gfp_t mask);
  357.  
  358. extern void lock_pin_lock(struct lockdep_map *lock);
  359. extern void lock_unpin_lock(struct lockdep_map *lock);
  360.  
  361. # define INIT_LOCKDEP                           .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
  362.  
  363. #define lockdep_depth(tsk)      (debug_locks ? (tsk)->lockdep_depth : 0)
  364.  
  365. #define lockdep_assert_held(l)  do {                            \
  366.                 WARN_ON(debug_locks && !lockdep_is_held(l));    \
  367.         } while (0)
  368.  
  369. #define lockdep_assert_held_once(l)     do {                            \
  370.                 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));       \
  371.         } while (0)
  372.  
  373. #define lockdep_recursing(tsk)  ((tsk)->lockdep_recursion)
  374.  
  375. #define lockdep_pin_lock(l)             lock_pin_lock(&(l)->dep_map)
  376. #define lockdep_unpin_lock(l)   lock_unpin_lock(&(l)->dep_map)
  377.  
  378. #else /* !CONFIG_LOCKDEP */
  379.  
  380. static inline void lockdep_off(void)
  381. {
  382. }
  383.  
  384. static inline void lockdep_on(void)
  385. {
  386. }
  387.  
  388. # define lock_acquire(l, s, t, r, c, n, i)      do { } while (0)
  389. # define lock_release(l, n, i)                  do { } while (0)
  390. # define lock_set_class(l, n, k, s, i)          do { } while (0)
  391. # define lock_set_subclass(l, s, i)             do { } while (0)
  392. # define lockdep_set_current_reclaim_state(g)   do { } while (0)
  393. # define lockdep_clear_current_reclaim_state()  do { } while (0)
  394. # define lockdep_trace_alloc(g)                 do { } while (0)
  395. # define lockdep_init()                         do { } while (0)
  396. # define lockdep_info()                         do { } while (0)
  397. # define lockdep_init_map(lock, name, key, sub) \
  398.                 do { (void)(name); (void)(key); } while (0)
  399. # define lockdep_set_class(lock, key)           do { (void)(key); } while (0)
  400. # define lockdep_set_class_and_name(lock, key, name) \
  401.                 do { (void)(key); (void)(name); } while (0)
  402. #define lockdep_set_class_and_subclass(lock, key, sub) \
  403.                 do { (void)(key); } while (0)
  404. #define lockdep_set_subclass(lock, sub)         do { } while (0)
  405.  
  406. #define lockdep_set_novalidate_class(lock) do { } while (0)
  407.  
  408. /*
  409.  * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
  410.  * case since the result is not well defined and the caller should rather
  411.  * #ifdef the call himself.
  412.  */
  413.  
  414. # define INIT_LOCKDEP
  415. # define lockdep_reset()                do { debug_locks = 1; } while (0)
  416. # define lockdep_free_key_range(start, size)    do { } while (0)
  417. # define lockdep_sys_exit()                     do { } while (0)
  418. /*
  419.  * The class key takes no space if lockdep is disabled:
  420.  */
  421. struct lock_class_key { };
  422.  
  423. #define lockdep_depth(tsk)      (0)
  424.  
  425. #define lockdep_assert_held(l)                  do { (void)(l); } while (0)
  426. #define lockdep_assert_held_once(l)             do { (void)(l); } while (0)
  427.  
  428. #define lockdep_recursing(tsk)                  (0)
  429.  
  430. #define lockdep_pin_lock(l)                             do { (void)(l); } while (0)
  431. #define lockdep_unpin_lock(l)                   do { (void)(l); } while (0)
  432.  
  433. #endif /* !LOCKDEP */
  434.  
  435. #ifdef CONFIG_LOCK_STAT
  436.  
  437. extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
  438. extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
  439.  
  440. #define LOCK_CONTENDED(_lock, try, lock)                        \
  441. do {                                                            \
  442.         if (!try(_lock)) {                                      \
  443.                 lock_contended(&(_lock)->dep_map, _RET_IP_);    \
  444.                 lock(_lock);                                    \
  445.         }                                                       \
  446.         lock_acquired(&(_lock)->dep_map, _RET_IP_);                     \
  447. } while (0)
  448.  
  449. #else /* CONFIG_LOCK_STAT */
  450.  
  451. #define lock_contended(lockdep_map, ip) do {} while (0)
  452. #define lock_acquired(lockdep_map, ip) do {} while (0)
  453.  
  454. #define LOCK_CONTENDED(_lock, try, lock) \
  455.         lock(_lock)
  456.  
  457. #endif /* CONFIG_LOCK_STAT */
  458.  
  459. #ifdef CONFIG_LOCKDEP
  460.  
  461. /*
  462.  * On lockdep we dont want the hand-coded irq-enable of
  463.  * _raw_*_lock_flags() code, because lockdep assumes
  464.  * that interrupts are not re-enabled during lock-acquire:
  465.  */
  466. #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  467.         LOCK_CONTENDED((_lock), (try), (lock))
  468.  
  469. #else /* CONFIG_LOCKDEP */
  470.  
  471. #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
  472.         lockfl((_lock), (flags))
  473.  
  474. #endif /* CONFIG_LOCKDEP */
  475.  
  476. #ifdef CONFIG_TRACE_IRQFLAGS
  477. extern void print_irqtrace_events(struct task_struct *curr);
  478. #else
  479. static inline void print_irqtrace_events(struct task_struct *curr)
  480. {
  481. }
  482. #endif
  483.  
  484. /*
  485.  * For trivial one-depth nesting of a lock-class, the following
  486.  * global define can be used. (Subsystems with multiple levels
  487.  * of nesting should define their own lock-nesting subclasses.)
  488.  */
  489. #define SINGLE_DEPTH_NESTING                    1
  490.  
  491. /*
  492.  * Map the dependency ops to NOP or to real lockdep ops, depending
  493.  * on the per lock-class debug mode:
  494.  */
  495.  
  496. #define lock_acquire_exclusive(l, s, t, n, i)           lock_acquire(l, s, t, 0, 1, n, i)
  497. #define lock_acquire_shared(l, s, t, n, i)              lock_acquire(l, s, t, 1, 1, n, i)
  498. #define lock_acquire_shared_recursive(l, s, t, n, i)    lock_acquire(l, s, t, 2, 1, n, i)
  499.  
  500. #define spin_acquire(l, s, t, i)                lock_acquire_exclusive(l, s, t, NULL, i)
  501. #define spin_acquire_nest(l, s, t, n, i)        lock_acquire_exclusive(l, s, t, n, i)
  502. #define spin_release(l, n, i)                   lock_release(l, n, i)
  503.  
  504. #define rwlock_acquire(l, s, t, i)              lock_acquire_exclusive(l, s, t, NULL, i)
  505. #define rwlock_acquire_read(l, s, t, i)         lock_acquire_shared_recursive(l, s, t, NULL, i)
  506. #define rwlock_release(l, n, i)                 lock_release(l, n, i)
  507.  
  508. #define seqcount_acquire(l, s, t, i)            lock_acquire_exclusive(l, s, t, NULL, i)
  509. #define seqcount_acquire_read(l, s, t, i)       lock_acquire_shared_recursive(l, s, t, NULL, i)
  510. #define seqcount_release(l, n, i)               lock_release(l, n, i)
  511.  
  512. #define mutex_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
  513. #define mutex_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
  514. #define mutex_release(l, n, i)                  lock_release(l, n, i)
  515.  
  516. #define rwsem_acquire(l, s, t, i)               lock_acquire_exclusive(l, s, t, NULL, i)
  517. #define rwsem_acquire_nest(l, s, t, n, i)       lock_acquire_exclusive(l, s, t, n, i)
  518. #define rwsem_acquire_read(l, s, t, i)          lock_acquire_shared(l, s, t, NULL, i)
  519. #define rwsem_release(l, n, i)                  lock_release(l, n, i)
  520.  
  521. #define lock_map_acquire(l)                     lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
  522. #define lock_map_acquire_read(l)                lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
  523. #define lock_map_acquire_tryread(l)             lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
  524. #define lock_map_release(l)                     lock_release(l, 1, _THIS_IP_)
  525.  
  526. #ifdef CONFIG_PROVE_LOCKING
  527. # define might_lock(lock)                                               \
  528. do {                                                                    \
  529.         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
  530.         lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);    \
  531.         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
  532. } while (0)
  533. # define might_lock_read(lock)                                          \
  534. do {                                                                    \
  535.         typecheck(struct lockdep_map *, &(lock)->dep_map);              \
  536.         lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);    \
  537.         lock_release(&(lock)->dep_map, 0, _THIS_IP_);                   \
  538. } while (0)
  539. #else
  540. # define might_lock(lock) do { } while (0)
  541. # define might_lock_read(lock) do { } while (0)
  542. #endif
  543.  
  544. #ifdef CONFIG_LOCKDEP
  545. void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
  546. #else
  547. static inline void
  548. lockdep_rcu_suspicious(const char *file, const int line, const char *s)
  549. {
  550. }
  551. #endif
  552.  
  553. #endif /* __LINUX_LOCKDEP_H */
  554.