Subversion Repositories Kolibri OS

Rev

Rev 6082 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef __LINUX_SPINLOCK_H
2
#define __LINUX_SPINLOCK_H
3
 
4
/*
5
 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6
 *
7
 * here's the role of the various spinlock/rwlock related include files:
8
 *
9
 * on SMP builds:
10
 *
1970 serge 11
 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
1408 serge 12
 *                        initializers
13
 *
14
 *  linux/spinlock_types.h:
15
 *                        defines the generic type and initializers
16
 *
1970 serge 17
 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
1408 serge 18
 *                        implementations, mostly inline assembly code
19
 *
20
 *   (also included on UP-debug builds:)
21
 *
22
 *  linux/spinlock_api_smp.h:
23
 *                        contains the prototypes for the _spin_*() APIs.
24
 *
25
 *  linux/spinlock.h:     builds the final spin_*() APIs.
26
 *
27
 * on UP builds:
28
 *
29
 *  linux/spinlock_type_up.h:
30
 *                        contains the generic, simplified UP spinlock type.
31
 *                        (which is an empty structure on non-debug builds)
32
 *
33
 *  linux/spinlock_types.h:
34
 *                        defines the generic type and initializers
35
 *
36
 *  linux/spinlock_up.h:
1970 serge 37
 *                        contains the arch_spin_*()/etc. version of UP
1408 serge 38
 *                        builds. (which are NOPs on non-debug, non-preempt
39
 *                        builds)
40
 *
41
 *   (included on UP-non-debug builds:)
42
 *
43
 *  linux/spinlock_api_up.h:
44
 *                        builds the _spin_*() APIs.
45
 *
46
 *  linux/spinlock.h:     builds the final spin_*() APIs.
47
 */
48
 
49
#include 
5272 serge 50
#include 
5270 serge 51
#include 
1408 serge 52
#include 
5272 serge 53
#include 
7143 serge 54
#include 
1408 serge 55
#include 
56
#include 
5272 serge 57
#include 
5270 serge 58
#include 
1408 serge 59
 
60
 
61
/*
62
 * Must define these before including other files, inline functions need them
63
 */
1970 serge 64
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
1408 serge 65
 
66
#define LOCK_SECTION_START(extra)               \
67
        ".subsection 1\n\t"                     \
68
        extra                                   \
69
        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
70
        LOCK_SECTION_NAME ":\n\t"               \
71
        ".endif\n"
72
 
73
#define LOCK_SECTION_END                        \
74
        ".previous\n\t"
75
 
76
#define __lockfunc __attribute__((section(".spinlock.text")))
77
 
78
/*
1970 serge 79
 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
1408 serge 80
 */
81
#include 
82
 
83
/*
1970 serge 84
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
1408 serge 85
 */
86
#ifdef CONFIG_SMP
87
# include 
88
#else
89
# include 
90
#endif
91
 
92
#ifdef CONFIG_DEBUG_SPINLOCK
5272 serge 93
  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
6082 serge 94
				   struct lock_class_key *key);
5272 serge 95
# define raw_spin_lock_init(lock)				\
1408 serge 96
do {								\
97
	static struct lock_class_key __key;			\
98
								\
5272 serge 99
	__raw_spin_lock_init((lock), #lock, &__key);		\
1408 serge 100
} while (0)
101
 
102
#else
5272 serge 103
# define raw_spin_lock_init(lock)				\
104
	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
1408 serge 105
#endif
106
 
5272 serge 107
#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
1408 serge 108
 
109
#ifdef CONFIG_GENERIC_LOCKBREAK
5272 serge 110
#define raw_spin_is_contended(lock) ((lock)->break_lock)
1408 serge 111
#else
112
 
5272 serge 113
#ifdef arch_spin_is_contended
114
#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
1408 serge 115
#else
5272 serge 116
#define raw_spin_is_contended(lock)	(((void)(lock), 0))
117
#endif /*arch_spin_is_contended*/
1408 serge 118
#endif
119
 
5272 serge 120
/*
121
 * Despite its name it doesn't necessarily has to be a full barrier.
122
 * It should only guarantee that a STORE before the critical section
6082 serge 123
 * can not be reordered with LOADs and STOREs inside this section.
5272 serge 124
 * spin_lock() is the one-way barrier, this LOAD can not escape out
125
 * of the region. So the default implementation simply ensures that
126
 * a STORE can not move into the critical section, smp_wmb() should
127
 * serialize it with another STORE done by spin_lock().
128
 */
129
#ifndef smp_mb__before_spinlock
130
#define smp_mb__before_spinlock()	smp_wmb()
1408 serge 131
#endif
132
 
133
/**
5272 serge 134
 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
1408 serge 135
 * @lock: the spinlock in question.
136
 */
5272 serge 137
#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
1408 serge 138
 
139
#ifdef CONFIG_DEBUG_SPINLOCK
5272 serge 140
 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
142
 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
143
 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
1408 serge 144
#else
5272 serge 145
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
146
{
147
	__acquire(lock);
148
	arch_spin_lock(&lock->raw_lock);
149
}
150
 
151
static inline void
152
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
153
{
154
	__acquire(lock);
155
	arch_spin_lock_flags(&lock->raw_lock, *flags);
156
}
157
 
158
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
159
{
160
	return arch_spin_trylock(&(lock)->raw_lock);
161
}
162
 
163
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
164
{
165
	arch_spin_unlock(&lock->raw_lock);
166
	__release(lock);
167
}
1408 serge 168
#endif
169
 
170
/*
5272 serge 171
 * Define the various spin_lock methods.  Note we define these
172
 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
173
 * various methods are defined as nops in the case they are not
174
 * required.
1408 serge 175
 */
5272 serge 176
#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
1408 serge 177
 
5272 serge 178
#define raw_spin_lock(lock)	_raw_spin_lock(lock)
1408 serge 179
 
180
#ifdef CONFIG_DEBUG_LOCK_ALLOC
5272 serge 181
# define raw_spin_lock_nested(lock, subclass) \
182
	_raw_spin_lock_nested(lock, subclass)
6082 serge 183
# define raw_spin_lock_bh_nested(lock, subclass) \
184
	_raw_spin_lock_bh_nested(lock, subclass)
5272 serge 185
 
186
# define raw_spin_lock_nest_lock(lock, nest_lock)			\
1408 serge 187
	 do {								\
188
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
5272 serge 189
		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
1408 serge 190
	 } while (0)
191
#else
5272 serge 192
/*
193
 * Always evaluate the 'subclass' argument to avoid that the compiler
194
 * warns about set-but-not-used variables when building with
195
 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
196
 */
197
# define raw_spin_lock_nested(lock, subclass)		\
198
	_raw_spin_lock(((void)(subclass), (lock)))
199
# define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
6082 serge 200
# define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
1408 serge 201
#endif
202
 
203
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
204
 
5272 serge 205
#define raw_spin_lock_irqsave(lock, flags)			\
1408 serge 206
	do {						\
207
		typecheck(unsigned long, flags);	\
5272 serge 208
		flags = _raw_spin_lock_irqsave(lock);	\
1408 serge 209
	} while (0)
210
 
211
#ifdef CONFIG_DEBUG_LOCK_ALLOC
5272 serge 212
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
1408 serge 213
	do {								\
214
		typecheck(unsigned long, flags);			\
5272 serge 215
		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
1408 serge 216
	} while (0)
217
#else
5272 serge 218
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
1408 serge 219
	do {								\
220
		typecheck(unsigned long, flags);			\
5272 serge 221
		flags = _raw_spin_lock_irqsave(lock);			\
1408 serge 222
	} while (0)
223
#endif
224
 
225
#else
226
 
5272 serge 227
#define raw_spin_lock_irqsave(lock, flags)		\
1408 serge 228
	do {						\
229
		typecheck(unsigned long, flags);	\
5272 serge 230
		_raw_spin_lock_irqsave(lock, flags);	\
1408 serge 231
	} while (0)
232
 
5272 serge 233
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
234
	raw_spin_lock_irqsave(lock, flags)
235
 
1408 serge 236
#endif
237
 
5272 serge 238
#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
239
#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
240
#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
241
#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
1408 serge 242
 
5272 serge 243
#define raw_spin_unlock_irqrestore(lock, flags)		\
6082 serge 244
	do {							\
245
		typecheck(unsigned long, flags);		\
5272 serge 246
		_raw_spin_unlock_irqrestore(lock, flags);	\
1408 serge 247
	} while (0)
5272 serge 248
#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
1408 serge 249
 
5272 serge 250
#define raw_spin_trylock_bh(lock) \
251
	__cond_lock(lock, _raw_spin_trylock_bh(lock))
1408 serge 252
 
5272 serge 253
#define raw_spin_trylock_irq(lock) \
1408 serge 254
({ \
255
	local_irq_disable(); \
5272 serge 256
	raw_spin_trylock(lock) ? \
1408 serge 257
	1 : ({ local_irq_enable(); 0;  }); \
258
})
259
 
5272 serge 260
#define raw_spin_trylock_irqsave(lock, flags) \
1408 serge 261
({ \
262
	local_irq_save(flags); \
5272 serge 263
	raw_spin_trylock(lock) ? \
1408 serge 264
	1 : ({ local_irq_restore(flags); 0; }); \
265
})
266
 
5272 serge 267
/**
268
 * raw_spin_can_lock - would raw_spin_trylock() succeed?
269
 * @lock: the spinlock in question.
270
 */
271
#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
272
 
273
/* Include rwlock functions */
274
#include 
275
 
276
/*
277
 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
278
 */
279
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
280
# include 
281
#else
282
# include 
283
#endif
284
 
285
/*
286
 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
287
 */
288
 
6082 serge 289
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
5272 serge 290
{
291
	return &lock->rlock;
292
}
293
 
294
#define spin_lock_init(_lock)				\
295
do {							\
296
	spinlock_check(_lock);				\
297
	raw_spin_lock_init(&(_lock)->rlock);		\
298
} while (0)
299
 
6082 serge 300
static __always_inline void spin_lock(spinlock_t *lock)
5272 serge 301
{
302
	raw_spin_lock(&lock->rlock);
303
}
304
 
6082 serge 305
static __always_inline void spin_lock_bh(spinlock_t *lock)
5272 serge 306
{
307
	raw_spin_lock_bh(&lock->rlock);
308
}
309
 
6082 serge 310
static __always_inline int spin_trylock(spinlock_t *lock)
5272 serge 311
{
312
	return raw_spin_trylock(&lock->rlock);
313
}
314
 
315
#define spin_lock_nested(lock, subclass)			\
316
do {								\
317
	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
318
} while (0)
319
 
6082 serge 320
#define spin_lock_bh_nested(lock, subclass)			\
321
do {								\
322
	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
323
} while (0)
324
 
5272 serge 325
#define spin_lock_nest_lock(lock, nest_lock)				\
326
do {									\
327
	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
328
} while (0)
329
 
6082 serge 330
static __always_inline void spin_lock_irq(spinlock_t *lock)
5272 serge 331
{
332
	raw_spin_lock_irq(&lock->rlock);
333
}
334
 
335
#define spin_lock_irqsave(lock, flags)				\
336
do {								\
337
	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
338
} while (0)
339
 
340
#define spin_lock_irqsave_nested(lock, flags, subclass)			\
341
do {									\
342
	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
343
} while (0)
344
 
6082 serge 345
static __always_inline void spin_unlock(spinlock_t *lock)
5272 serge 346
{
347
	raw_spin_unlock(&lock->rlock);
348
}
349
 
6082 serge 350
static __always_inline void spin_unlock_bh(spinlock_t *lock)
5272 serge 351
{
352
	raw_spin_unlock_bh(&lock->rlock);
353
}
354
 
6082 serge 355
static __always_inline void spin_unlock_irq(spinlock_t *lock)
5272 serge 356
{
357
	raw_spin_unlock_irq(&lock->rlock);
358
}
359
 
6082 serge 360
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
5272 serge 361
{
362
	raw_spin_unlock_irqrestore(&lock->rlock, flags);
363
}
364
 
6082 serge 365
static __always_inline int spin_trylock_bh(spinlock_t *lock)
5272 serge 366
{
367
	return raw_spin_trylock_bh(&lock->rlock);
368
}
369
 
6082 serge 370
static __always_inline int spin_trylock_irq(spinlock_t *lock)
5272 serge 371
{
372
	return raw_spin_trylock_irq(&lock->rlock);
373
}
374
 
375
#define spin_trylock_irqsave(lock, flags)			\
6082 serge 376
({								\
5272 serge 377
	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
1408 serge 378
})
379
 
6082 serge 380
static __always_inline void spin_unlock_wait(spinlock_t *lock)
5272 serge 381
{
382
	raw_spin_unlock_wait(&lock->rlock);
383
}
384
 
6082 serge 385
static __always_inline int spin_is_locked(spinlock_t *lock)
5272 serge 386
{
387
	return raw_spin_is_locked(&lock->rlock);
388
}
389
 
6082 serge 390
static __always_inline int spin_is_contended(spinlock_t *lock)
5272 serge 391
{
392
	return raw_spin_is_contended(&lock->rlock);
393
}
394
 
6082 serge 395
static __always_inline int spin_can_lock(spinlock_t *lock)
5272 serge 396
{
397
	return raw_spin_can_lock(&lock->rlock);
398
}
399
 
400
#define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
401
 
1408 serge 402
/*
403
 * Pull the atomic_t declaration:
404
 * (asm-mips/atomic.h needs above definitions)
405
 */
5272 serge 406
#include 
1408 serge 407
/**
408
 * atomic_dec_and_lock - lock on reaching reference count zero
409
 * @atomic: the atomic counter
410
 * @lock: the spinlock in question
411
 *
412
 * Decrements @atomic by 1.  If the result is 0, returns true and locks
413
 * @lock.  Returns false for all other cases.
414
 */
415
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
416
#define atomic_dec_and_lock(atomic, lock) \
417
		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
418
 
419
#endif /* __LINUX_SPINLOCK_H */