Rev 5056 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5056 | Rev 6082 | ||
---|---|---|---|
Line 22... | Line 22... | ||
22 | #include |
22 | #include |
23 | #include |
23 | #include |
24 | #include |
24 | #include |
25 | #include |
25 | #include |
26 | #include |
26 | #include |
- | 27 | ||
- | 28 | struct kos_taskdata |
|
- | 29 | { |
|
- | 30 | u32 event_mask; |
|
- | 31 | u32 pid; |
|
- | 32 | u16 r0; |
|
- | 33 | u8 state; |
|
- | 34 | u8 r1; |
|
- | 35 | u16 r2; |
|
- | 36 | u8 wnd_number; |
|
- | 37 | u8 r3; |
|
- | 38 | u32 mem_start; |
|
- | 39 | u32 counter_sum; |
|
- | 40 | u32 counter_add; |
|
- | 41 | u32 cpu_usage; |
|
- | 42 | }__attribute__((packed)); |
|
- | 43 | ||
- | 44 | static inline void mutex_set_owner(struct mutex *lock) |
|
- | 45 | { |
|
- | 46 | } |
|
- | 47 | ||
27 | /* |
48 | /* |
28 | * A negative mutex count indicates that waiters are sleeping waiting for the |
49 | * A negative mutex count indicates that waiters are sleeping waiting for the |
29 | * mutex. |
50 | * mutex. |
30 | */ |
51 | */ |
31 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
52 | #define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0) |
Line 41... | Line 62... | ||
41 | lock->osq = NULL; |
62 | lock->osq = NULL; |
42 | #endif |
63 | #endif |
Line 43... | Line 64... | ||
43 | 64 | ||
Line 44... | Line 65... | ||
44 | } |
65 | } |
45 | - | ||
46 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, |
66 | |
47 | struct ww_acquire_ctx *ww_ctx) |
- | |
48 | { |
- | |
49 | #ifdef CONFIG_DEBUG_MUTEXES |
- | |
50 | /* |
- | |
51 | * If this WARN_ON triggers, you used ww_mutex_lock to acquire, |
- | |
52 | * but released with a normal mutex_unlock in this call. |
67 | static inline int __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) |
53 | * |
- | |
54 | * This should never happen, always use ww_mutex_unlock. |
68 | { |
Line 55... | Line -... | ||
55 | */ |
- | |
56 | DEBUG_LOCKS_WARN_ON(ww->ctx); |
- | |
57 | 69 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); |
|
58 | /* |
70 | struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); |
Line 59... | Line 71... | ||
59 | * Not quite done after calling ww_acquire_done() ? |
71 | |
60 | */ |
- | |
61 | DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); |
72 | if (!hold_ctx) |
62 | - | ||
63 | if (ww_ctx->contending_lock) { |
- | |
64 | /* |
- | |
Line 65... | Line -... | ||
65 | * After -EDEADLK you tried to |
- | |
66 | * acquire a different ww_mutex? Bad! |
- | |
67 | */ |
73 | return 0; |
68 | DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); |
- | |
69 | 74 | ||
70 | /* |
75 | if (unlikely(ctx == hold_ctx)) |
71 | * You called ww_mutex_lock after receiving -EDEADLK, |
76 | return -EALREADY; |
Line 72... | Line 77... | ||
72 | * but 'forgot' to unlock everything else first? |
77 | |
- | 78 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
|
- | 79 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { |
|
- | 80 | return -EDEADLK; |
|
73 | */ |
81 | } |
74 | DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); |
- | |
75 | ww_ctx->contending_lock = NULL; |
82 | |
76 | } |
83 | return 0; |
77 | 84 | } |
|
78 | /* |
85 | |
Line 79... | Line 86... | ||
79 | * Naughty, using a different class will lead to undefined behavior! |
86 | |
80 | */ |
87 | static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, |
Line 95... | Line 102... | ||
95 | lock->ctx = NULL; |
102 | lock->ctx = NULL; |
96 | } |
103 | } |
97 | MutexUnlock(&lock->base); |
104 | MutexUnlock(&lock->base); |
98 | } |
105 | } |
Line 99... | Line 106... | ||
99 | 106 | ||
100 | int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
107 | static inline int __mutex_fastpath_lock_retval(atomic_t *count) |
- | 108 | { |
|
- | 109 | if (unlikely(atomic_dec_return(count) < 0)) |
|
- | 110 | return -1; |
|
- | 111 | else |
|
- | 112 | return 0; |
|
- | 113 | } |
|
- | 114 | ||
- | 115 | static __always_inline void |
|
- | 116 | ww_mutex_set_context_fastpath(struct ww_mutex *lock, |
|
- | 117 | struct ww_acquire_ctx *ctx) |
|
- | 118 | { |
|
101 | { |
119 | u32 flags; |
- | 120 | struct mutex_waiter *cur; |
|
102 | MutexLock(&lock->base); |
121 | |
- | 122 | ww_mutex_lock_acquired(lock, ctx); |
|
103 | ww_mutex_lock_acquired(lock, ctx); |
123 | |
Line 104... | Line 124... | ||
104 | lock->ctx = ctx; |
124 | lock->ctx = ctx; |
- | 125 | ||
- | 126 | /* |
|
- | 127 | * The lock->ctx update should be visible on all cores before |
|
- | 128 | * the atomic read is done, otherwise contended waiters might be |
|
- | 129 | * missed. The contended waiters will either see ww_ctx == NULL |
|
105 | 130 | * and keep spinning, or it will acquire wait_lock, add itself |
|
- | 131 | * to waiter list and sleep. |
|
Line -... | Line 132... | ||
- | 132 | */ |
|
- | 133 | smp_mb(); /* ^^^ */ |
|
- | 134 | ||
- | 135 | /* |
|
- | 136 | * Check if lock is contended, if not there is nobody to wake up |
|
Line -... | Line 137... | ||
- | 137 | */ |
|
- | 138 | if (likely(atomic_read(&lock->base.count) == 0)) |
|
- | 139 | return; |
|
- | 140 | ||
- | 141 | /* |
|
- | 142 | * Uh oh, we raced in fastpath, wake up everyone in this case, |
|
- | 143 | * so they can see the new lock->ctx. |
|
- | 144 | */ |
|
- | 145 | flags = safe_cli(); |
|
- | 146 | list_for_each_entry(cur, &lock->base.wait_list, list) { |
|
- | 147 | ((struct kos_taskdata*)cur->task)->state = 0; |
|
106 | return 0; |
148 | } |
- | 149 | safe_sti(flags); |
|
107 | } |
150 | } |
108 | 151 | ||
- | 152 | ww_mutex_set_context_slowpath(struct ww_mutex *lock, |
|
109 | 153 | struct ww_acquire_ctx *ctx) |
|
110 | int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
154 | { |
Line -... | Line 155... | ||
- | 155 | struct mutex_waiter *cur; |
|
- | 156 | ||
- | 157 | ww_mutex_lock_acquired(lock, ctx); |
|
- | 158 | lock->ctx = ctx; |
|
- | 159 | ||
- | 160 | /* |
|
- | 161 | * Give any possible sleeping processes the chance to wake up, |
|
- | 162 | * so they can recheck if they have to back off. |
|
- | 163 | */ |
|
- | 164 | list_for_each_entry(cur, &lock->base.wait_list, list) { |
|
- | 165 | ((struct kos_taskdata*)cur->task)->state = 0; |
|
- | 166 | } |
|
- | 167 | } |
|
- | 168 | ||
- | 169 | int __ww_mutex_lock_slowpath(struct ww_mutex *ww, struct ww_acquire_ctx *ctx) |
|
- | 170 | { |
|
- | 171 | struct mutex *lock; |
|
- | 172 | struct mutex_waiter waiter; |
|
- | 173 | struct kos_taskdata* taskdata; |
|
- | 174 | u32 eflags; |
|
- | 175 | int ret = 0; |
|
- | 176 | ||
- | 177 | lock = &ww->base; |
|
- | 178 | taskdata = (struct kos_taskdata*)(0x80003010); |
|
- | 179 | waiter.task = (u32*)taskdata; |
|
- | 180 | ||
- | 181 | eflags = safe_cli(); |
|
- | 182 | ||
- | 183 | list_add_tail(&waiter.list, &lock->wait_list); |
|
- | 184 | ||
- | 185 | for(;;) |
|
- | 186 | { |
|
- | 187 | if( atomic_xchg(&lock->count, -1) == 1) |
|
- | 188 | break; |
|
- | 189 | ||
- | 190 | if (ctx->acquired > 0) { |
|
- | 191 | ret = __ww_mutex_lock_check_stamp(lock, ctx); |
|
- | 192 | if (ret) |
|
- | 193 | goto err; |
|
- | 194 | }; |
|
- | 195 | taskdata->state = 1; |
|
- | 196 | change_task(); |
|
- | 197 | }; |
|
- | 198 | ||
- | 199 | if (likely(list_empty(&lock->wait_list))) |
|
- | 200 | atomic_set(&lock->count, 0); |
|
- | 201 | ||
- | 202 | ww_mutex_set_context_slowpath(ww, ctx); |
|
- | 203 | ||
- | 204 | err: |
|
- | 205 | list_del(&waiter.list); |
|
- | 206 | safe_sti(eflags); |
|
- | 207 | ||
- | 208 | return ret; |
|
- | 209 | } |
|
- | 210 | ||
- | 211 | ||
- | 212 | int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
|
- | 213 | { |
|
- | 214 | int ret; |
|
- | 215 | ||
- | 216 | ret = __mutex_fastpath_lock_retval(&lock->base.count); |
|
- | 217 | ||
- | 218 | if (likely(!ret)) { |
|
- | 219 | ww_mutex_set_context_fastpath(lock, ctx); |
|
- | 220 | mutex_set_owner(&lock->base); |
|
- | 221 | } else |
|
- | 222 | ret = __ww_mutex_lock_slowpath(lock, ctx); |
|
- | 223 | return ret; |
|
- | 224 | } |
|
- | 225 | ||
- | 226 | ||
- | 227 | int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
|
- | 228 | { |
|
- | 229 | int ret; |
|
- | 230 | ||
- | 231 | ret = __mutex_fastpath_lock_retval(&lock->base.count); |
|
- | 232 | ||
111 | { |
233 | if (likely(!ret)) { |
112 | MutexLock(&lock->base); |
234 | ww_mutex_set_context_fastpath(lock, ctx); |