Subversion Repositories Kolibri OS

Rev

Rev 6082 | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * kernel/locking/mutex.c
  3.  *
  4.  * Mutexes: blocking mutual exclusion locks
  5.  *
  6.  * Started by Ingo Molnar:
  7.  *
  8.  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9.  *
  10.  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  11.  * David Howells for suggestions and improvements.
  12.  *
  13.  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  14.  *    from the -rt tree, where it was originally implemented for rtmutexes
  15.  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  16.  *    and Sven Dietrich.
  17.  *
  18.  * Also see Documentation/mutex-design.txt.
  19.  */
  20. #include <linux/lockdep.h>
  21. #include <linux/mutex.h>
  22. #include <linux/ww_mutex.h>
  23. #include <linux/sched.h>
  24. #include <linux/export.h>
  25. #include <linux/spinlock.h>
  26. #include <syscall.h>
  27.  
  28. static inline void mutex_set_owner(struct mutex *lock)
  29. {
  30. }
  31.  
  32. /*
  33.  * A negative mutex count indicates that waiters are sleeping waiting for the
  34.  * mutex.
  35.  */
  36. #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
  37.  
  38. void
  39. __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  40. {
  41.     atomic_set(&lock->count, 1);
  42. //    spin_lock_init(&lock->wait_lock);
  43.     INIT_LIST_HEAD(&lock->wait_list);
  44. //    mutex_clear_owner(lock);
  45. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  46.     lock->osq = NULL;
  47. #endif
  48.  
  49. }
  50.  
  51. static inline int __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
  52. {
  53.         struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
  54.         struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
  55.  
  56.         if (!hold_ctx)
  57.                 return 0;
  58.  
  59.         if (unlikely(ctx == hold_ctx))
  60.                 return -EALREADY;
  61.  
  62.         if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
  63.             (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
  64.                 return -EDEADLK;
  65.         }
  66.  
  67.         return 0;
  68. }
  69.  
  70.  
  71. static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
  72.                            struct ww_acquire_ctx *ww_ctx)
  73. {
  74.     ww_ctx->acquired++;
  75. }
  76.  
  77. void ww_mutex_unlock(struct ww_mutex *lock)
  78. {
  79.     /*
  80.      * The unlocking fastpath is the 0->1 transition from 'locked'
  81.      * into 'unlocked' state:
  82.      */
  83.     if (lock->ctx) {
  84.             if (lock->ctx->acquired > 0)
  85.                     lock->ctx->acquired--;
  86.             lock->ctx = NULL;
  87.     }
  88.     MutexUnlock(&lock->base);
  89. }
  90.  
  91. static inline int __mutex_fastpath_lock_retval(atomic_t *count)
  92. {
  93.     if (unlikely(atomic_dec_return(count) < 0))
  94.         return -1;
  95.     else
  96.         return 0;
  97. }
  98.  
  99. static __always_inline void
  100. ww_mutex_set_context_fastpath(struct ww_mutex *lock,
  101.                                struct ww_acquire_ctx *ctx)
  102. {
  103.     u32 flags;
  104.     struct mutex_waiter *cur;
  105.  
  106.     ww_mutex_lock_acquired(lock, ctx);
  107.  
  108.     lock->ctx = ctx;
  109.  
  110.     /*
  111.      * The lock->ctx update should be visible on all cores before
  112.      * the atomic read is done, otherwise contended waiters might be
  113.      * missed. The contended waiters will either see ww_ctx == NULL
  114.      * and keep spinning, or it will acquire wait_lock, add itself
  115.      * to waiter list and sleep.
  116.      */
  117.     smp_mb(); /* ^^^ */
  118.  
  119.     /*
  120.      * Check if lock is contended, if not there is nobody to wake up
  121.      */
  122.     if (likely(atomic_read(&lock->base.count) == 0))
  123.             return;
  124.  
  125.     /*
  126.      * Uh oh, we raced in fastpath, wake up everyone in this case,
  127.      * so they can see the new lock->ctx.
  128.      */
  129.     flags = safe_cli();
  130.     list_for_each_entry(cur, &lock->base.wait_list, list) {
  131.         ((struct kos_appdata*)cur->task)->state = KOS_SLOT_STATE_RUNNING;
  132.     }
  133.     safe_sti(flags);
  134. }
  135.  
  136. static __always_inline void
  137. ww_mutex_set_context_slowpath(struct ww_mutex *lock,
  138.                               struct ww_acquire_ctx *ctx)
  139. {
  140.     struct mutex_waiter *cur;
  141.  
  142.     ww_mutex_lock_acquired(lock, ctx);
  143.     lock->ctx = ctx;
  144.  
  145.     /*
  146.      * Give any possible sleeping processes the chance to wake up,
  147.      * so they can recheck if they have to back off.
  148.      */
  149.     list_for_each_entry(cur, &lock->base.wait_list, list) {
  150.         ((struct kos_appdata*)cur->task)->state = KOS_SLOT_STATE_RUNNING;
  151.     }
  152. }
  153.  
  154. int __ww_mutex_lock_slowpath(struct ww_mutex *ww, struct ww_acquire_ctx *ctx)
  155. {
  156.     struct mutex *lock;
  157.     struct mutex_waiter waiter;
  158.     struct kos_appdata *appdata;
  159.     u32 eflags;
  160.     int ret = 0;
  161.  
  162.     lock = &ww->base;
  163.     appdata = GetCurrSlot();
  164.     waiter.task = appdata;
  165.  
  166.     eflags = safe_cli();
  167.  
  168.     list_add_tail(&waiter.list, &lock->wait_list);
  169.  
  170.     for(;;)
  171.     {
  172.         if( atomic_xchg(&lock->count, -1) == 1)
  173.             break;
  174.  
  175.         if (ctx->acquired > 0) {
  176.             ret = __ww_mutex_lock_check_stamp(lock, ctx);
  177.             if (ret)
  178.                 goto err;
  179.         };
  180.         appdata->state = KOS_SLOT_STATE_SUSPENDED;
  181.         change_task();
  182.     };
  183.  
  184.     if (likely(list_empty(&lock->wait_list)))
  185.         atomic_set(&lock->count, 0);
  186.  
  187.     ww_mutex_set_context_slowpath(ww, ctx);
  188.  
  189. err:
  190.     list_del(&waiter.list);
  191.     safe_sti(eflags);
  192.  
  193.     return ret;
  194. }
  195.  
  196.  
  197. int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  198. {
  199.     int ret;
  200.  
  201.     ret = __mutex_fastpath_lock_retval(&lock->base.count);
  202.  
  203.     if (likely(!ret)) {
  204.             ww_mutex_set_context_fastpath(lock, ctx);
  205.             mutex_set_owner(&lock->base);
  206.     } else
  207.             ret = __ww_mutex_lock_slowpath(lock, ctx);
  208.     return ret;
  209. }
  210.  
  211.  
  212. int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  213. {
  214.     int ret;
  215.  
  216.     ret = __mutex_fastpath_lock_retval(&lock->base.count);
  217.  
  218.     if (likely(!ret)) {
  219.             ww_mutex_set_context_fastpath(lock, ctx);
  220.             mutex_set_owner(&lock->base);
  221.     } else
  222.             ret = __ww_mutex_lock_slowpath(lock, ctx);
  223.     return ret;
  224. }
  225.