Subversion Repositories Kolibri OS

Rev

Rev 6082 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * kernel/locking/mutex.c
  3.  *
  4.  * Mutexes: blocking mutual exclusion locks
  5.  *
  6.  * Started by Ingo Molnar:
  7.  *
  8.  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  9.  *
  10.  * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
  11.  * David Howells for suggestions and improvements.
  12.  *
  13.  *  - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
  14.  *    from the -rt tree, where it was originally implemented for rtmutexes
  15.  *    by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
  16.  *    and Sven Dietrich.
  17.  *
  18.  * Also see Documentation/mutex-design.txt.
  19.  */
  20. #include <linux/lockdep.h>
  21. #include <linux/mutex.h>
  22. #include <linux/ww_mutex.h>
  23. #include <linux/sched.h>
  24. #include <linux/export.h>
  25. #include <linux/spinlock.h>
  26. #include <syscall.h>
  27. /*
  28.  * A negative mutex count indicates that waiters are sleeping waiting for the
  29.  * mutex.
  30.  */
  31. #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
  32.  
  33. void
  34. __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
  35. {
  36.     atomic_set(&lock->count, 1);
  37. //    spin_lock_init(&lock->wait_lock);
  38.     INIT_LIST_HEAD(&lock->wait_list);
  39. //    mutex_clear_owner(lock);
  40. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  41.     lock->osq = NULL;
  42. #endif
  43.  
  44. }
  45.  
  46. static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
  47.                            struct ww_acquire_ctx *ww_ctx)
  48. {
  49. #ifdef CONFIG_DEBUG_MUTEXES
  50.     /*
  51.      * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
  52.      * but released with a normal mutex_unlock in this call.
  53.      *
  54.      * This should never happen, always use ww_mutex_unlock.
  55.      */
  56.     DEBUG_LOCKS_WARN_ON(ww->ctx);
  57.  
  58.     /*
  59.      * Not quite done after calling ww_acquire_done() ?
  60.      */
  61.     DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
  62.  
  63.     if (ww_ctx->contending_lock) {
  64.         /*
  65.          * After -EDEADLK you tried to
  66.          * acquire a different ww_mutex? Bad!
  67.          */
  68.         DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
  69.  
  70.         /*
  71.          * You called ww_mutex_lock after receiving -EDEADLK,
  72.          * but 'forgot' to unlock everything else first?
  73.          */
  74.         DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
  75.         ww_ctx->contending_lock = NULL;
  76.     }
  77.  
  78.     /*
  79.      * Naughty, using a different class will lead to undefined behavior!
  80.      */
  81.     DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
  82. #endif
  83.     ww_ctx->acquired++;
  84. }
  85.  
  86. void ww_mutex_unlock(struct ww_mutex *lock)
  87. {
  88.     /*
  89.      * The unlocking fastpath is the 0->1 transition from 'locked'
  90.      * into 'unlocked' state:
  91.      */
  92.     if (lock->ctx) {
  93.         if (lock->ctx->acquired > 0)
  94.             lock->ctx->acquired--;
  95.         lock->ctx = NULL;
  96.     }
  97.     MutexUnlock(&lock->base);
  98. }
  99.  
  100. int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  101. {
  102.     MutexLock(&lock->base);
  103.     ww_mutex_lock_acquired(lock, ctx);
  104.     lock->ctx = ctx;
  105.  
  106.     return 0;
  107. }
  108.  
  109.  
  110. int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
  111. {
  112.     MutexLock(&lock->base);
  113.     ww_mutex_lock_acquired(lock, ctx);
  114.     lock->ctx = ctx;
  115.  
  116.     return 0;
  117. }
  118.