Rev 5272 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 5272 | Rev 6082 | ||
---|---|---|---|
1 | #ifndef __LINUX_SPINLOCK_H |
1 | #ifndef __LINUX_SPINLOCK_H |
2 | #define __LINUX_SPINLOCK_H |
2 | #define __LINUX_SPINLOCK_H |
3 | 3 | ||
4 | /* |
4 | /* |
5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
5 | * include/linux/spinlock.h - generic spinlock/rwlock declarations |
6 | * |
6 | * |
7 | * here's the role of the various spinlock/rwlock related include files: |
7 | * here's the role of the various spinlock/rwlock related include files: |
8 | * |
8 | * |
9 | * on SMP builds: |
9 | * on SMP builds: |
10 | * |
10 | * |
11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
12 | * initializers |
12 | * initializers |
13 | * |
13 | * |
14 | * linux/spinlock_types.h: |
14 | * linux/spinlock_types.h: |
15 | * defines the generic type and initializers |
15 | * defines the generic type and initializers |
16 | * |
16 | * |
17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
18 | * implementations, mostly inline assembly code |
18 | * implementations, mostly inline assembly code |
19 | * |
19 | * |
20 | * (also included on UP-debug builds:) |
20 | * (also included on UP-debug builds:) |
21 | * |
21 | * |
22 | * linux/spinlock_api_smp.h: |
22 | * linux/spinlock_api_smp.h: |
23 | * contains the prototypes for the _spin_*() APIs. |
23 | * contains the prototypes for the _spin_*() APIs. |
24 | * |
24 | * |
25 | * linux/spinlock.h: builds the final spin_*() APIs. |
25 | * linux/spinlock.h: builds the final spin_*() APIs. |
26 | * |
26 | * |
27 | * on UP builds: |
27 | * on UP builds: |
28 | * |
28 | * |
29 | * linux/spinlock_type_up.h: |
29 | * linux/spinlock_type_up.h: |
30 | * contains the generic, simplified UP spinlock type. |
30 | * contains the generic, simplified UP spinlock type. |
31 | * (which is an empty structure on non-debug builds) |
31 | * (which is an empty structure on non-debug builds) |
32 | * |
32 | * |
33 | * linux/spinlock_types.h: |
33 | * linux/spinlock_types.h: |
34 | * defines the generic type and initializers |
34 | * defines the generic type and initializers |
35 | * |
35 | * |
36 | * linux/spinlock_up.h: |
36 | * linux/spinlock_up.h: |
37 | * contains the arch_spin_*()/etc. version of UP |
37 | * contains the arch_spin_*()/etc. version of UP |
38 | * builds. (which are NOPs on non-debug, non-preempt |
38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) |
39 | * builds) |
40 | * |
40 | * |
41 | * (included on UP-non-debug builds:) |
41 | * (included on UP-non-debug builds:) |
42 | * |
42 | * |
43 | * linux/spinlock_api_up.h: |
43 | * linux/spinlock_api_up.h: |
44 | * builds the _spin_*() APIs. |
44 | * builds the _spin_*() APIs. |
45 | * |
45 | * |
46 | * linux/spinlock.h: builds the final spin_*() APIs. |
46 | * linux/spinlock.h: builds the final spin_*() APIs. |
47 | */ |
47 | */ |
48 | 48 | ||
49 | #include |
49 | #include |
50 | #include |
50 | #include |
51 | #include |
51 | #include |
52 | #include |
52 | #include |
53 | #include |
53 | #include |
54 | #include |
54 | #include |
55 | #include |
55 | #include |
56 | #include |
56 | #include |
57 | #include |
57 | #include |
58 | 58 | ||
59 | 59 | ||
60 | /* |
60 | /* |
61 | * Must define these before including other files, inline functions need them |
61 | * Must define these before including other files, inline functions need them |
62 | */ |
62 | */ |
63 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
63 | #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME |
64 | 64 | ||
65 | #define LOCK_SECTION_START(extra) \ |
65 | #define LOCK_SECTION_START(extra) \ |
66 | ".subsection 1\n\t" \ |
66 | ".subsection 1\n\t" \ |
67 | extra \ |
67 | extra \ |
68 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
68 | ".ifndef " LOCK_SECTION_NAME "\n\t" \ |
69 | LOCK_SECTION_NAME ":\n\t" \ |
69 | LOCK_SECTION_NAME ":\n\t" \ |
70 | ".endif\n" |
70 | ".endif\n" |
71 | 71 | ||
72 | #define LOCK_SECTION_END \ |
72 | #define LOCK_SECTION_END \ |
73 | ".previous\n\t" |
73 | ".previous\n\t" |
74 | 74 | ||
75 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
75 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
76 | 76 | ||
77 | /* |
77 | /* |
78 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
78 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
79 | */ |
79 | */ |
80 | #include |
80 | #include |
81 | 81 | ||
82 | /* |
82 | /* |
83 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
83 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them): |
84 | */ |
84 | */ |
85 | #ifdef CONFIG_SMP |
85 | #ifdef CONFIG_SMP |
86 | # include |
86 | # include |
87 | #else |
87 | #else |
88 | # include |
88 | # include |
89 | #endif |
89 | #endif |
90 | 90 | ||
91 | #ifdef CONFIG_DEBUG_SPINLOCK |
91 | #ifdef CONFIG_DEBUG_SPINLOCK |
92 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
92 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
93 | struct lock_class_key *key); |
93 | struct lock_class_key *key); |
94 | # define raw_spin_lock_init(lock) \ |
94 | # define raw_spin_lock_init(lock) \ |
95 | do { \ |
95 | do { \ |
96 | static struct lock_class_key __key; \ |
96 | static struct lock_class_key __key; \ |
97 | \ |
97 | \ |
98 | __raw_spin_lock_init((lock), #lock, &__key); \ |
98 | __raw_spin_lock_init((lock), #lock, &__key); \ |
99 | } while (0) |
99 | } while (0) |
100 | 100 | ||
101 | #else |
101 | #else |
102 | # define raw_spin_lock_init(lock) \ |
102 | # define raw_spin_lock_init(lock) \ |
103 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
103 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
104 | #endif |
104 | #endif |
105 | 105 | ||
106 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
106 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
107 | 107 | ||
108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
109 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
109 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
110 | #else |
110 | #else |
111 | 111 | ||
112 | #ifdef arch_spin_is_contended |
112 | #ifdef arch_spin_is_contended |
113 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
113 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
114 | #else |
114 | #else |
115 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
115 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
116 | #endif /*arch_spin_is_contended*/ |
116 | #endif /*arch_spin_is_contended*/ |
117 | #endif |
117 | #endif |
118 | 118 | ||
119 | /* |
119 | /* |
120 | * Despite its name it doesn't necessarily has to be a full barrier. |
120 | * Despite its name it doesn't necessarily has to be a full barrier. |
121 | * It should only guarantee that a STORE before the critical section |
121 | * It should only guarantee that a STORE before the critical section |
122 | * can not be reordered with a LOAD inside this section. |
122 | * can not be reordered with LOADs and STOREs inside this section. |
123 | * spin_lock() is the one-way barrier, this LOAD can not escape out |
123 | * spin_lock() is the one-way barrier, this LOAD can not escape out |
124 | * of the region. So the default implementation simply ensures that |
124 | * of the region. So the default implementation simply ensures that |
125 | * a STORE can not move into the critical section, smp_wmb() should |
125 | * a STORE can not move into the critical section, smp_wmb() should |
126 | * serialize it with another STORE done by spin_lock(). |
126 | * serialize it with another STORE done by spin_lock(). |
127 | */ |
127 | */ |
128 | #ifndef smp_mb__before_spinlock |
128 | #ifndef smp_mb__before_spinlock |
129 | #define smp_mb__before_spinlock() smp_wmb() |
129 | #define smp_mb__before_spinlock() smp_wmb() |
130 | #endif |
130 | #endif |
131 | - | ||
132 | /* |
- | |
133 | * Place this after a lock-acquisition primitive to guarantee that |
- | |
134 | * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies |
- | |
135 | * if the UNLOCK and LOCK are executed by the same CPU or if the |
- | |
136 | * UNLOCK and LOCK operate on the same lock variable. |
- | |
137 | */ |
- | |
138 | #ifndef smp_mb__after_unlock_lock |
- | |
139 | #define smp_mb__after_unlock_lock() do { } while (0) |
- | |
140 | #endif |
- | |
141 | 131 | ||
142 | /** |
132 | /** |
143 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
133 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
144 | * @lock: the spinlock in question. |
134 | * @lock: the spinlock in question. |
145 | */ |
135 | */ |
146 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
136 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
147 | 137 | ||
148 | #ifdef CONFIG_DEBUG_SPINLOCK |
138 | #ifdef CONFIG_DEBUG_SPINLOCK |
149 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
139 | extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
150 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
140 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
151 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
141 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
152 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
142 | extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
153 | #else |
143 | #else |
154 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
144 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
155 | { |
145 | { |
156 | __acquire(lock); |
146 | __acquire(lock); |
157 | arch_spin_lock(&lock->raw_lock); |
147 | arch_spin_lock(&lock->raw_lock); |
158 | } |
148 | } |
159 | 149 | ||
160 | static inline void |
150 | static inline void |
161 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
151 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
162 | { |
152 | { |
163 | __acquire(lock); |
153 | __acquire(lock); |
164 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
154 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
165 | } |
155 | } |
166 | 156 | ||
167 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
157 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
168 | { |
158 | { |
169 | return arch_spin_trylock(&(lock)->raw_lock); |
159 | return arch_spin_trylock(&(lock)->raw_lock); |
170 | } |
160 | } |
171 | 161 | ||
172 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
162 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
173 | { |
163 | { |
174 | arch_spin_unlock(&lock->raw_lock); |
164 | arch_spin_unlock(&lock->raw_lock); |
175 | __release(lock); |
165 | __release(lock); |
176 | } |
166 | } |
177 | #endif |
167 | #endif |
178 | 168 | ||
179 | /* |
169 | /* |
180 | * Define the various spin_lock methods. Note we define these |
170 | * Define the various spin_lock methods. Note we define these |
181 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The |
171 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The |
182 | * various methods are defined as nops in the case they are not |
172 | * various methods are defined as nops in the case they are not |
183 | * required. |
173 | * required. |
184 | */ |
174 | */ |
185 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
175 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
186 | 176 | ||
187 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
177 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
188 | 178 | ||
189 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
179 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
190 | # define raw_spin_lock_nested(lock, subclass) \ |
180 | # define raw_spin_lock_nested(lock, subclass) \ |
191 | _raw_spin_lock_nested(lock, subclass) |
181 | _raw_spin_lock_nested(lock, subclass) |
- | 182 | # define raw_spin_lock_bh_nested(lock, subclass) \ |
|
- | 183 | _raw_spin_lock_bh_nested(lock, subclass) |
|
192 | 184 | ||
193 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
185 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ |
194 | do { \ |
186 | do { \ |
195 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
187 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
196 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
188 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
197 | } while (0) |
189 | } while (0) |
198 | #else |
190 | #else |
199 | /* |
191 | /* |
200 | * Always evaluate the 'subclass' argument to avoid that the compiler |
192 | * Always evaluate the 'subclass' argument to avoid that the compiler |
201 | * warns about set-but-not-used variables when building with |
193 | * warns about set-but-not-used variables when building with |
202 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
194 | * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
203 | */ |
195 | */ |
204 | # define raw_spin_lock_nested(lock, subclass) \ |
196 | # define raw_spin_lock_nested(lock, subclass) \ |
205 | _raw_spin_lock(((void)(subclass), (lock))) |
197 | _raw_spin_lock(((void)(subclass), (lock))) |
206 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
198 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
- | 199 | # define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock) |
|
207 | #endif |
200 | #endif |
208 | 201 | ||
209 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
202 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
210 | 203 | ||
211 | #define raw_spin_lock_irqsave(lock, flags) \ |
204 | #define raw_spin_lock_irqsave(lock, flags) \ |
212 | do { \ |
205 | do { \ |
213 | typecheck(unsigned long, flags); \ |
206 | typecheck(unsigned long, flags); \ |
214 | flags = _raw_spin_lock_irqsave(lock); \ |
207 | flags = _raw_spin_lock_irqsave(lock); \ |
215 | } while (0) |
208 | } while (0) |
216 | 209 | ||
217 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
210 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
218 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
211 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
219 | do { \ |
212 | do { \ |
220 | typecheck(unsigned long, flags); \ |
213 | typecheck(unsigned long, flags); \ |
221 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
214 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
222 | } while (0) |
215 | } while (0) |
223 | #else |
216 | #else |
224 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
217 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
225 | do { \ |
218 | do { \ |
226 | typecheck(unsigned long, flags); \ |
219 | typecheck(unsigned long, flags); \ |
227 | flags = _raw_spin_lock_irqsave(lock); \ |
220 | flags = _raw_spin_lock_irqsave(lock); \ |
228 | } while (0) |
221 | } while (0) |
229 | #endif |
222 | #endif |
230 | 223 | ||
231 | #else |
224 | #else |
232 | 225 | ||
233 | #define raw_spin_lock_irqsave(lock, flags) \ |
226 | #define raw_spin_lock_irqsave(lock, flags) \ |
234 | do { \ |
227 | do { \ |
235 | typecheck(unsigned long, flags); \ |
228 | typecheck(unsigned long, flags); \ |
236 | _raw_spin_lock_irqsave(lock, flags); \ |
229 | _raw_spin_lock_irqsave(lock, flags); \ |
237 | } while (0) |
230 | } while (0) |
238 | 231 | ||
239 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
232 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
240 | raw_spin_lock_irqsave(lock, flags) |
233 | raw_spin_lock_irqsave(lock, flags) |
241 | 234 | ||
242 | #endif |
235 | #endif |
243 | 236 | ||
244 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
237 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
245 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
238 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
246 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
239 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
247 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
240 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
248 | 241 | ||
249 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
242 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
250 | do { \ |
243 | do { \ |
251 | typecheck(unsigned long, flags); \ |
244 | typecheck(unsigned long, flags); \ |
252 | _raw_spin_unlock_irqrestore(lock, flags); \ |
245 | _raw_spin_unlock_irqrestore(lock, flags); \ |
253 | } while (0) |
246 | } while (0) |
254 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
247 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
255 | 248 | ||
256 | #define raw_spin_trylock_bh(lock) \ |
249 | #define raw_spin_trylock_bh(lock) \ |
257 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
250 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) |
258 | 251 | ||
259 | #define raw_spin_trylock_irq(lock) \ |
252 | #define raw_spin_trylock_irq(lock) \ |
260 | ({ \ |
253 | ({ \ |
261 | local_irq_disable(); \ |
254 | local_irq_disable(); \ |
262 | raw_spin_trylock(lock) ? \ |
255 | raw_spin_trylock(lock) ? \ |
263 | 1 : ({ local_irq_enable(); 0; }); \ |
256 | 1 : ({ local_irq_enable(); 0; }); \ |
264 | }) |
257 | }) |
265 | 258 | ||
266 | #define raw_spin_trylock_irqsave(lock, flags) \ |
259 | #define raw_spin_trylock_irqsave(lock, flags) \ |
267 | ({ \ |
260 | ({ \ |
268 | local_irq_save(flags); \ |
261 | local_irq_save(flags); \ |
269 | raw_spin_trylock(lock) ? \ |
262 | raw_spin_trylock(lock) ? \ |
270 | 1 : ({ local_irq_restore(flags); 0; }); \ |
263 | 1 : ({ local_irq_restore(flags); 0; }); \ |
271 | }) |
264 | }) |
272 | 265 | ||
273 | /** |
266 | /** |
274 | * raw_spin_can_lock - would raw_spin_trylock() succeed? |
267 | * raw_spin_can_lock - would raw_spin_trylock() succeed? |
275 | * @lock: the spinlock in question. |
268 | * @lock: the spinlock in question. |
276 | */ |
269 | */ |
277 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) |
270 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) |
278 | 271 | ||
279 | /* Include rwlock functions */ |
272 | /* Include rwlock functions */ |
280 | #include |
273 | #include |
281 | 274 | ||
282 | /* |
275 | /* |
283 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
276 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
284 | */ |
277 | */ |
285 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
278 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
286 | # include |
279 | # include |
287 | #else |
280 | #else |
288 | # include |
281 | # include |
289 | #endif |
282 | #endif |
290 | 283 | ||
291 | /* |
284 | /* |
292 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
285 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
293 | */ |
286 | */ |
294 | 287 | ||
295 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
288 | static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
296 | { |
289 | { |
297 | return &lock->rlock; |
290 | return &lock->rlock; |
298 | } |
291 | } |
299 | 292 | ||
300 | #define spin_lock_init(_lock) \ |
293 | #define spin_lock_init(_lock) \ |
301 | do { \ |
294 | do { \ |
302 | spinlock_check(_lock); \ |
295 | spinlock_check(_lock); \ |
303 | raw_spin_lock_init(&(_lock)->rlock); \ |
296 | raw_spin_lock_init(&(_lock)->rlock); \ |
304 | } while (0) |
297 | } while (0) |
305 | 298 | ||
306 | static inline void spin_lock(spinlock_t *lock) |
299 | static __always_inline void spin_lock(spinlock_t *lock) |
307 | { |
300 | { |
308 | raw_spin_lock(&lock->rlock); |
301 | raw_spin_lock(&lock->rlock); |
309 | } |
302 | } |
310 | 303 | ||
311 | static inline void spin_lock_bh(spinlock_t *lock) |
304 | static __always_inline void spin_lock_bh(spinlock_t *lock) |
312 | { |
305 | { |
313 | raw_spin_lock_bh(&lock->rlock); |
306 | raw_spin_lock_bh(&lock->rlock); |
314 | } |
307 | } |
315 | 308 | ||
316 | static inline int spin_trylock(spinlock_t *lock) |
309 | static __always_inline int spin_trylock(spinlock_t *lock) |
317 | { |
310 | { |
318 | return raw_spin_trylock(&lock->rlock); |
311 | return raw_spin_trylock(&lock->rlock); |
319 | } |
312 | } |
320 | 313 | ||
321 | #define spin_lock_nested(lock, subclass) \ |
314 | #define spin_lock_nested(lock, subclass) \ |
322 | do { \ |
315 | do { \ |
323 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
316 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
324 | } while (0) |
317 | } while (0) |
- | 318 | ||
- | 319 | #define spin_lock_bh_nested(lock, subclass) \ |
|
- | 320 | do { \ |
|
- | 321 | raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\ |
|
- | 322 | } while (0) |
|
325 | 323 | ||
326 | #define spin_lock_nest_lock(lock, nest_lock) \ |
324 | #define spin_lock_nest_lock(lock, nest_lock) \ |
327 | do { \ |
325 | do { \ |
328 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
326 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
329 | } while (0) |
327 | } while (0) |
330 | 328 | ||
331 | static inline void spin_lock_irq(spinlock_t *lock) |
329 | static __always_inline void spin_lock_irq(spinlock_t *lock) |
332 | { |
330 | { |
333 | raw_spin_lock_irq(&lock->rlock); |
331 | raw_spin_lock_irq(&lock->rlock); |
334 | } |
332 | } |
335 | 333 | ||
336 | #define spin_lock_irqsave(lock, flags) \ |
334 | #define spin_lock_irqsave(lock, flags) \ |
337 | do { \ |
335 | do { \ |
338 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
336 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
339 | } while (0) |
337 | } while (0) |
340 | 338 | ||
341 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
339 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ |
342 | do { \ |
340 | do { \ |
343 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
341 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
344 | } while (0) |
342 | } while (0) |
345 | 343 | ||
346 | static inline void spin_unlock(spinlock_t *lock) |
344 | static __always_inline void spin_unlock(spinlock_t *lock) |
347 | { |
345 | { |
348 | raw_spin_unlock(&lock->rlock); |
346 | raw_spin_unlock(&lock->rlock); |
349 | } |
347 | } |
350 | 348 | ||
351 | static inline void spin_unlock_bh(spinlock_t *lock) |
349 | static __always_inline void spin_unlock_bh(spinlock_t *lock) |
352 | { |
350 | { |
353 | raw_spin_unlock_bh(&lock->rlock); |
351 | raw_spin_unlock_bh(&lock->rlock); |
354 | } |
352 | } |
355 | 353 | ||
356 | static inline void spin_unlock_irq(spinlock_t *lock) |
354 | static __always_inline void spin_unlock_irq(spinlock_t *lock) |
357 | { |
355 | { |
358 | raw_spin_unlock_irq(&lock->rlock); |
356 | raw_spin_unlock_irq(&lock->rlock); |
359 | } |
357 | } |
360 | 358 | ||
361 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
359 | static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
362 | { |
360 | { |
363 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
361 | raw_spin_unlock_irqrestore(&lock->rlock, flags); |
364 | } |
362 | } |
365 | 363 | ||
366 | static inline int spin_trylock_bh(spinlock_t *lock) |
364 | static __always_inline int spin_trylock_bh(spinlock_t *lock) |
367 | { |
365 | { |
368 | return raw_spin_trylock_bh(&lock->rlock); |
366 | return raw_spin_trylock_bh(&lock->rlock); |
369 | } |
367 | } |
370 | 368 | ||
371 | static inline int spin_trylock_irq(spinlock_t *lock) |
369 | static __always_inline int spin_trylock_irq(spinlock_t *lock) |
372 | { |
370 | { |
373 | return raw_spin_trylock_irq(&lock->rlock); |
371 | return raw_spin_trylock_irq(&lock->rlock); |
374 | } |
372 | } |
375 | 373 | ||
376 | #define spin_trylock_irqsave(lock, flags) \ |
374 | #define spin_trylock_irqsave(lock, flags) \ |
377 | ({ \ |
375 | ({ \ |
378 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
376 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
379 | }) |
377 | }) |
380 | 378 | ||
381 | static inline void spin_unlock_wait(spinlock_t *lock) |
379 | static __always_inline void spin_unlock_wait(spinlock_t *lock) |
382 | { |
380 | { |
383 | raw_spin_unlock_wait(&lock->rlock); |
381 | raw_spin_unlock_wait(&lock->rlock); |
384 | } |
382 | } |
385 | 383 | ||
386 | static inline int spin_is_locked(spinlock_t *lock) |
384 | static __always_inline int spin_is_locked(spinlock_t *lock) |
387 | { |
385 | { |
388 | return raw_spin_is_locked(&lock->rlock); |
386 | return raw_spin_is_locked(&lock->rlock); |
389 | } |
387 | } |
390 | 388 | ||
391 | static inline int spin_is_contended(spinlock_t *lock) |
389 | static __always_inline int spin_is_contended(spinlock_t *lock) |
392 | { |
390 | { |
393 | return raw_spin_is_contended(&lock->rlock); |
391 | return raw_spin_is_contended(&lock->rlock); |
394 | } |
392 | } |
395 | 393 | ||
396 | static inline int spin_can_lock(spinlock_t *lock) |
394 | static __always_inline int spin_can_lock(spinlock_t *lock) |
397 | { |
395 | { |
398 | return raw_spin_can_lock(&lock->rlock); |
396 | return raw_spin_can_lock(&lock->rlock); |
399 | } |
397 | } |
400 | 398 | ||
401 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
399 | #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
402 | 400 | ||
403 | /* |
401 | /* |
404 | * Pull the atomic_t declaration: |
402 | * Pull the atomic_t declaration: |
405 | * (asm-mips/atomic.h needs above definitions) |
403 | * (asm-mips/atomic.h needs above definitions) |
406 | */ |
404 | */ |
407 | #include |
405 | #include |
408 | /** |
406 | /** |
409 | * atomic_dec_and_lock - lock on reaching reference count zero |
407 | * atomic_dec_and_lock - lock on reaching reference count zero |
410 | * @atomic: the atomic counter |
408 | * @atomic: the atomic counter |
411 | * @lock: the spinlock in question |
409 | * @lock: the spinlock in question |
412 | * |
410 | * |
413 | * Decrements @atomic by 1. If the result is 0, returns true and locks |
411 | * Decrements @atomic by 1. If the result is 0, returns true and locks |
414 | * @lock. Returns false for all other cases. |
412 | * @lock. Returns false for all other cases. |
415 | */ |
413 | */ |
416 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
414 | extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); |
417 | #define atomic_dec_and_lock(atomic, lock) \ |
415 | #define atomic_dec_and_lock(atomic, lock) \ |
418 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
416 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
419 | 417 | ||
420 | #endif /* __LINUX_SPINLOCK_H */ |
418 | #endif /* __LINUX_SPINLOCK_H */ |