Subversion Repositories Kolibri OS

Rev

Rev 5270 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
  3.  *
  4.  * Original mutex implementation started by Ingo Molnar:
  5.  *
  6.  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  7.  *
  8.  * Wound/wait implementation:
  9.  *  Copyright (C) 2013 Canonical Ltd.
  10.  *
  11.  * This file contains the main data structure and API definitions.
  12.  */
  13.  
  14. #ifndef __LINUX_WW_MUTEX_H
  15. #define __LINUX_WW_MUTEX_H
  16.  
  17. #include <linux/mutex.h>
  18. #include <syscall.h>
  19.  
  20. #define current (void*)GetPid()
  21.  
  22. struct ww_class {
  23.         atomic_long_t stamp;
  24.         struct lock_class_key acquire_key;
  25.         struct lock_class_key mutex_key;
  26.         const char *acquire_name;
  27.         const char *mutex_name;
  28. };
  29.  
  30. struct ww_acquire_ctx {
  31.         struct task_struct *task;
  32.         unsigned long stamp;
  33.         unsigned acquired;
  34. #ifdef CONFIG_DEBUG_MUTEXES
  35.         unsigned done_acquire;
  36.         struct ww_class *ww_class;
  37.         struct ww_mutex *contending_lock;
  38. #endif
  39. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  40.         struct lockdep_map dep_map;
  41. #endif
  42. #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
  43.         unsigned deadlock_inject_interval;
  44.         unsigned deadlock_inject_countdown;
  45. #endif
  46. };
  47.  
  48. struct ww_mutex {
  49.         struct mutex base;
  50.         struct ww_acquire_ctx *ctx;
  51. #ifdef CONFIG_DEBUG_MUTEXES
  52.         struct ww_class *ww_class;
  53. #endif
  54. };
  55.  
  56. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  57. # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
  58.                 , .ww_class = &ww_class
  59. #else
  60. # define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
  61. #endif
  62.  
  63. #define __WW_CLASS_INITIALIZER(ww_class) \
  64.                 { .stamp = ATOMIC_LONG_INIT(0) \
  65.                 , .acquire_name = #ww_class "_acquire" \
  66.                 , .mutex_name = #ww_class "_mutex" }
  67.  
  68. #define __WW_MUTEX_INITIALIZER(lockname, class) \
  69.                 { .base = { \__MUTEX_INITIALIZER(lockname) } \
  70.                 __WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
  71.  
  72. #define DEFINE_WW_CLASS(classname) \
  73.         struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
  74.  
  75. #define DEFINE_WW_MUTEX(mutexname, ww_class) \
  76.         struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
  77.  
  78. /**
  79.  * ww_mutex_init - initialize the w/w mutex
  80.  * @lock: the mutex to be initialized
  81.  * @ww_class: the w/w class the mutex should belong to
  82.  *
  83.  * Initialize the w/w mutex to unlocked state and associate it with the given
  84.  * class.
  85.  *
  86.  * It is not allowed to initialize an already locked mutex.
  87.  */
  88. static inline void ww_mutex_init(struct ww_mutex *lock,
  89.                                  struct ww_class *ww_class)
  90. {
  91.     MutexInit(&lock->base);
  92.         lock->ctx = NULL;
  93. #ifdef CONFIG_DEBUG_MUTEXES
  94.         lock->ww_class = ww_class;
  95. #endif
  96. }
  97.  
  98. /**
  99.  * ww_acquire_init - initialize a w/w acquire context
  100.  * @ctx: w/w acquire context to initialize
  101.  * @ww_class: w/w class of the context
  102.  *
  103.  * Initializes an context to acquire multiple mutexes of the given w/w class.
  104.  *
  105.  * Context-based w/w mutex acquiring can be done in any order whatsoever within
  106.  * a given lock class. Deadlocks will be detected and handled with the
  107.  * wait/wound logic.
  108.  *
  109.  * Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
  110.  * result in undetected deadlocks and is so forbidden. Mixing different contexts
  111.  * for the same w/w class when acquiring mutexes can also result in undetected
  112.  * deadlocks, and is hence also forbidden. Both types of abuse will be caught by
  113.  * enabling CONFIG_PROVE_LOCKING.
  114.  *
  115.  * Nesting of acquire contexts for _different_ w/w classes is possible, subject
  116.  * to the usual locking rules between different lock classes.
  117.  *
  118.  * An acquire context must be released with ww_acquire_fini by the same task
  119.  * before the memory is freed. It is recommended to allocate the context itself
  120.  * on the stack.
  121.  */
  122. static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
  123.                                    struct ww_class *ww_class)
  124. {
  125.     ctx->task = current;
  126.         ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
  127.         ctx->acquired = 0;
  128. #ifdef CONFIG_DEBUG_MUTEXES
  129.         ctx->ww_class = ww_class;
  130.         ctx->done_acquire = 0;
  131.         ctx->contending_lock = NULL;
  132. #endif
  133. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  134.         debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
  135.         lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
  136.                          &ww_class->acquire_key, 0);
  137.         mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
  138. #endif
  139. #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
  140.         ctx->deadlock_inject_interval = 1;
  141.         ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
  142. #endif
  143. }
  144.  
  145. /**
  146.  * ww_acquire_done - marks the end of the acquire phase
  147.  * @ctx: the acquire context
  148.  *
  149.  * Marks the end of the acquire phase, any further w/w mutex lock calls using
  150.  * this context are forbidden.
  151.  *
  152.  * Calling this function is optional, it is just useful to document w/w mutex
  153.  * code and clearly designated the acquire phase from actually using the locked
  154.  * data structures.
  155.  */
  156. static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
  157. {
  158. #ifdef CONFIG_DEBUG_MUTEXES
  159.         lockdep_assert_held(ctx);
  160.  
  161.         DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
  162.         ctx->done_acquire = 1;
  163. #endif
  164. }
  165.  
  166. /**
  167.  * ww_acquire_fini - releases a w/w acquire context
  168.  * @ctx: the acquire context to free
  169.  *
  170.  * Releases a w/w acquire context. This must be called _after_ all acquired w/w
  171.  * mutexes have been released with ww_mutex_unlock.
  172.  */
  173. static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
  174. {
  175. #ifdef CONFIG_DEBUG_MUTEXES
  176.         mutex_release(&ctx->dep_map, 0, _THIS_IP_);
  177.  
  178.         DEBUG_LOCKS_WARN_ON(ctx->acquired);
  179.         if (!config_enabled(CONFIG_PROVE_LOCKING))
  180.                 /*
  181.                  * lockdep will normally handle this,
  182.                  * but fail without anyway
  183.                  */
  184.                 ctx->done_acquire = 1;
  185.  
  186.         if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
  187.                 /* ensure ww_acquire_fini will still fail if called twice */
  188.                 ctx->acquired = ~0U;
  189. #endif
  190. }
  191.  
  192. extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
  193.                                         struct ww_acquire_ctx *ctx);
  194. extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
  195.                                                       struct ww_acquire_ctx *ctx);
  196.  
  197. /**
  198.  * ww_mutex_lock - acquire the w/w mutex
  199.  * @lock: the mutex to be acquired
  200.  * @ctx: w/w acquire context, or NULL to acquire only a single lock.
  201.  *
  202.  * Lock the w/w mutex exclusively for this task.
  203.  *
  204.  * Deadlocks within a given w/w class of locks are detected and handled with the
  205.  * wait/wound algorithm. If the lock isn't immediately avaiable this function
  206.  * will either sleep until it is (wait case). Or it selects the current context
  207.  * for backing off by returning -EDEADLK (wound case). Trying to acquire the
  208.  * same lock with the same context twice is also detected and signalled by
  209.  * returning -EALREADY. Returns 0 if the mutex was successfully acquired.
  210.  *
  211.  * In the wound case the caller must release all currently held w/w mutexes for
  212.  * the given context and then wait for this contending lock to be available by
  213.  * calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
  214.  * lock and proceed with trying to acquire further w/w mutexes (e.g. when
  215.  * scanning through lru lists trying to free resources).
  216.  *
  217.  * The mutex must later on be released by the same task that
  218.  * acquired it. The task may not exit without first unlocking the mutex. Also,
  219.  * kernel memory where the mutex resides must not be freed with the mutex still
  220.  * locked. The mutex must first be initialized (or statically defined) before it
  221.  * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
  222.  * of the same w/w lock class as was used to initialize the acquire context.
  223.  *
  224.  * A mutex acquired with this function must be released with ww_mutex_unlock.
  225.  */
  226. static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  227. {
  228.         if (ctx)
  229.                 return __ww_mutex_lock(lock, ctx);
  230.  
  231.         mutex_lock(&lock->base);
  232.         return 0;
  233. }
  234.  
  235. /**
  236.  * ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
  237.  * @lock: the mutex to be acquired
  238.  * @ctx: w/w acquire context
  239.  *
  240.  * Lock the w/w mutex exclusively for this task.
  241.  *
  242.  * Deadlocks within a given w/w class of locks are detected and handled with the
  243.  * wait/wound algorithm. If the lock isn't immediately avaiable this function
  244.  * will either sleep until it is (wait case). Or it selects the current context
  245.  * for backing off by returning -EDEADLK (wound case). Trying to acquire the
  246.  * same lock with the same context twice is also detected and signalled by
  247.  * returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
  248.  * signal arrives while waiting for the lock then this function returns -EINTR.
  249.  *
  250.  * In the wound case the caller must release all currently held w/w mutexes for
  251.  * the given context and then wait for this contending lock to be available by
  252.  * calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
  253.  * not acquire this lock and proceed with trying to acquire further w/w mutexes
  254.  * (e.g. when scanning through lru lists trying to free resources).
  255.  *
  256.  * The mutex must later on be released by the same task that
  257.  * acquired it. The task may not exit without first unlocking the mutex. Also,
  258.  * kernel memory where the mutex resides must not be freed with the mutex still
  259.  * locked. The mutex must first be initialized (or statically defined) before it
  260.  * can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
  261.  * of the same w/w lock class as was used to initialize the acquire context.
  262.  *
  263.  * A mutex acquired with this function must be released with ww_mutex_unlock.
  264.  */
  265. static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
  266.                                                            struct ww_acquire_ctx *ctx)
  267. {
  268.         if (ctx)
  269.                 return __ww_mutex_lock_interruptible(lock, ctx);
  270.         else
  271.                 return mutex_lock_interruptible(&lock->base);
  272. }
  273.  
  274. /**
  275.  * ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
  276.  * @lock: the mutex to be acquired
  277.  * @ctx: w/w acquire context
  278.  *
  279.  * Acquires a w/w mutex with the given context after a wound case. This function
  280.  * will sleep until the lock becomes available.
  281.  *
  282.  * The caller must have released all w/w mutexes already acquired with the
  283.  * context and then call this function on the contended lock.
  284.  *
  285.  * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
  286.  * needs with ww_mutex_lock. Note that the -EALREADY return code from
  287.  * ww_mutex_lock can be used to avoid locking this contended mutex twice.
  288.  *
  289.  * It is forbidden to call this function with any other w/w mutexes associated
  290.  * with the context held. It is forbidden to call this on anything else than the
  291.  * contending mutex.
  292.  *
  293.  * Note that the slowpath lock acquiring can also be done by calling
  294.  * ww_mutex_lock directly. This function here is simply to help w/w mutex
  295.  * locking code readability by clearly denoting the slowpath.
  296.  */
  297. static inline void
  298. ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  299. {
  300.         int ret;
  301. #ifdef CONFIG_DEBUG_MUTEXES
  302.         DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
  303. #endif
  304.         ret = ww_mutex_lock(lock, ctx);
  305.         (void)ret;
  306. }
  307.  
  308. /**
  309.  * ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
  310.  * @lock: the mutex to be acquired
  311.  * @ctx: w/w acquire context
  312.  *
  313.  * Acquires a w/w mutex with the given context after a wound case. This function
  314.  * will sleep until the lock becomes available and returns 0 when the lock has
  315.  * been acquired. If a signal arrives while waiting for the lock then this
  316.  * function returns -EINTR.
  317.  *
  318.  * The caller must have released all w/w mutexes already acquired with the
  319.  * context and then call this function on the contended lock.
  320.  *
  321.  * Afterwards the caller may continue to (re)acquire the other w/w mutexes it
  322.  * needs with ww_mutex_lock. Note that the -EALREADY return code from
  323.  * ww_mutex_lock can be used to avoid locking this contended mutex twice.
  324.  *
  325.  * It is forbidden to call this function with any other w/w mutexes associated
  326.  * with the given context held. It is forbidden to call this on anything else
  327.  * than the contending mutex.
  328.  *
  329.  * Note that the slowpath lock acquiring can also be done by calling
  330.  * ww_mutex_lock_interruptible directly. This function here is simply to help
  331.  * w/w mutex locking code readability by clearly denoting the slowpath.
  332.  */
  333. static inline int __must_check
  334. ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
  335.                                  struct ww_acquire_ctx *ctx)
  336. {
  337. #ifdef CONFIG_DEBUG_MUTEXES
  338.         DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
  339. #endif
  340.         return ww_mutex_lock_interruptible(lock, ctx);
  341. }
  342.  
  343. extern void ww_mutex_unlock(struct ww_mutex *lock);
  344.  
  345. /**
  346.  * ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
  347.  * @lock: mutex to lock
  348.  *
  349.  * Trylocks a mutex without acquire context, so no deadlock detection is
  350.  * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
  351.  */
  352. static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
  353. {
  354.         return mutex_trylock(&lock->base);
  355. }
  356.  
  357. /***
  358.  * ww_mutex_destroy - mark a w/w mutex unusable
  359.  * @lock: the mutex to be destroyed
  360.  *
  361.  * This function marks the mutex uninitialized, and any subsequent
  362.  * use of the mutex is forbidden. The mutex must not be locked when
  363.  * this function is called.
  364.  */
  365. static inline void ww_mutex_destroy(struct ww_mutex *lock)
  366. {
  367.         mutex_destroy(&lock->base);
  368. }
  369.  
  370. /**
  371.  * ww_mutex_is_locked - is the w/w mutex locked
  372.  * @lock: the mutex to be queried
  373.  *
  374.  * Returns 1 if the mutex is locked, 0 if unlocked.
  375.  */
  376. static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
  377. {
  378.         return mutex_is_locked(&lock->base);
  379. }
  380.  
  381. #endif
  382.