Subversion Repositories Kolibri OS

Rev

Rev 5272 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1408 serge 1
#ifndef __LINUX_SPINLOCK_H
2
#define __LINUX_SPINLOCK_H
3
 
4
/*
5
 * include/linux/spinlock.h - generic spinlock/rwlock declarations
6
 *
7
 * here's the role of the various spinlock/rwlock related include files:
8
 *
9
 * on SMP builds:
10
 *
1970 serge 11
 *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
1408 serge 12
 *                        initializers
13
 *
14
 *  linux/spinlock_types.h:
15
 *                        defines the generic type and initializers
16
 *
1970 serge 17
 *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
1408 serge 18
 *                        implementations, mostly inline assembly code
19
 *
20
 *   (also included on UP-debug builds:)
21
 *
22
 *  linux/spinlock_api_smp.h:
23
 *                        contains the prototypes for the _spin_*() APIs.
24
 *
25
 *  linux/spinlock.h:     builds the final spin_*() APIs.
26
 *
27
 * on UP builds:
28
 *
29
 *  linux/spinlock_type_up.h:
30
 *                        contains the generic, simplified UP spinlock type.
31
 *                        (which is an empty structure on non-debug builds)
32
 *
33
 *  linux/spinlock_types.h:
34
 *                        defines the generic type and initializers
35
 *
36
 *  linux/spinlock_up.h:
1970 serge 37
 *                        contains the arch_spin_*()/etc. version of UP
1408 serge 38
 *                        builds. (which are NOPs on non-debug, non-preempt
39
 *                        builds)
40
 *
41
 *   (included on UP-non-debug builds:)
42
 *
43
 *  linux/spinlock_api_up.h:
44
 *                        builds the _spin_*() APIs.
45
 *
46
 *  linux/spinlock.h:     builds the final spin_*() APIs.
47
 */
48
 
49
#include 
5272 serge 50
#include 
5270 serge 51
#include 
1408 serge 52
#include 
5272 serge 53
#include 
1408 serge 54
#include 
55
#include 
5272 serge 56
#include 
5270 serge 57
#include 
1408 serge 58
 
59
 
60
/*
61
 * Must define these before including other files, inline functions need them
62
 */
1970 serge 63
#define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
1408 serge 64
 
65
#define LOCK_SECTION_START(extra)               \
66
        ".subsection 1\n\t"                     \
67
        extra                                   \
68
        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
69
        LOCK_SECTION_NAME ":\n\t"               \
70
        ".endif\n"
71
 
72
#define LOCK_SECTION_END                        \
73
        ".previous\n\t"
74
 
75
#define __lockfunc __attribute__((section(".spinlock.text")))
76
 
77
/*
1970 serge 78
 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
1408 serge 79
 */
80
#include 
81
 
82
/*
1970 serge 83
 * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
1408 serge 84
 */
85
#ifdef CONFIG_SMP
86
# include 
87
#else
88
# include 
89
#endif
90
 
91
#ifdef CONFIG_DEBUG_SPINLOCK
5272 serge 92
  extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
6082 serge 93
				   struct lock_class_key *key);
5272 serge 94
# define raw_spin_lock_init(lock)				\
1408 serge 95
do {								\
96
	static struct lock_class_key __key;			\
97
								\
5272 serge 98
	__raw_spin_lock_init((lock), #lock, &__key);		\
1408 serge 99
} while (0)
100
 
101
#else
5272 serge 102
# define raw_spin_lock_init(lock)				\
103
	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
1408 serge 104
#endif
105
 
5272 serge 106
#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
1408 serge 107
 
108
#ifdef CONFIG_GENERIC_LOCKBREAK
5272 serge 109
#define raw_spin_is_contended(lock) ((lock)->break_lock)
1408 serge 110
#else
111
 
5272 serge 112
#ifdef arch_spin_is_contended
113
#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
1408 serge 114
#else
5272 serge 115
#define raw_spin_is_contended(lock)	(((void)(lock), 0))
116
#endif /*arch_spin_is_contended*/
1408 serge 117
#endif
118
 
5272 serge 119
/*
120
 * Despite its name it doesn't necessarily has to be a full barrier.
121
 * It should only guarantee that a STORE before the critical section
6082 serge 122
 * can not be reordered with LOADs and STOREs inside this section.
5272 serge 123
 * spin_lock() is the one-way barrier, this LOAD can not escape out
124
 * of the region. So the default implementation simply ensures that
125
 * a STORE can not move into the critical section, smp_wmb() should
126
 * serialize it with another STORE done by spin_lock().
127
 */
128
#ifndef smp_mb__before_spinlock
129
#define smp_mb__before_spinlock()	smp_wmb()
1408 serge 130
#endif
131
 
132
/**
5272 serge 133
 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
1408 serge 134
 * @lock: the spinlock in question.
135
 */
5272 serge 136
#define raw_spin_unlock_wait(lock)	arch_spin_unlock_wait(&(lock)->raw_lock)
1408 serge 137
 
138
#ifdef CONFIG_DEBUG_SPINLOCK
5272 serge 139
 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
140
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
141
 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
142
 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
1408 serge 143
#else
5272 serge 144
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
145
{
146
	__acquire(lock);
147
	arch_spin_lock(&lock->raw_lock);
148
}
149
 
150
static inline void
151
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
152
{
153
	__acquire(lock);
154
	arch_spin_lock_flags(&lock->raw_lock, *flags);
155
}
156
 
157
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
158
{
159
	return arch_spin_trylock(&(lock)->raw_lock);
160
}
161
 
162
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
163
{
164
	arch_spin_unlock(&lock->raw_lock);
165
	__release(lock);
166
}
1408 serge 167
#endif
168
 
169
/*
5272 serge 170
 * Define the various spin_lock methods.  Note we define these
171
 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
172
 * various methods are defined as nops in the case they are not
173
 * required.
1408 serge 174
 */
5272 serge 175
#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
1408 serge 176
 
5272 serge 177
#define raw_spin_lock(lock)	_raw_spin_lock(lock)
1408 serge 178
 
179
#ifdef CONFIG_DEBUG_LOCK_ALLOC
5272 serge 180
# define raw_spin_lock_nested(lock, subclass) \
181
	_raw_spin_lock_nested(lock, subclass)
6082 serge 182
# define raw_spin_lock_bh_nested(lock, subclass) \
183
	_raw_spin_lock_bh_nested(lock, subclass)
5272 serge 184
 
185
# define raw_spin_lock_nest_lock(lock, nest_lock)			\
1408 serge 186
	 do {								\
187
		 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
5272 serge 188
		 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
1408 serge 189
	 } while (0)
190
#else
5272 serge 191
/*
192
 * Always evaluate the 'subclass' argument to avoid that the compiler
193
 * warns about set-but-not-used variables when building with
194
 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
195
 */
196
# define raw_spin_lock_nested(lock, subclass)		\
197
	_raw_spin_lock(((void)(subclass), (lock)))
198
# define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
6082 serge 199
# define raw_spin_lock_bh_nested(lock, subclass)	_raw_spin_lock_bh(lock)
1408 serge 200
#endif
201
 
202
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
203
 
5272 serge 204
#define raw_spin_lock_irqsave(lock, flags)			\
1408 serge 205
	do {						\
206
		typecheck(unsigned long, flags);	\
5272 serge 207
		flags = _raw_spin_lock_irqsave(lock);	\
1408 serge 208
	} while (0)
209
 
210
#ifdef CONFIG_DEBUG_LOCK_ALLOC
5272 serge 211
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
1408 serge 212
	do {								\
213
		typecheck(unsigned long, flags);			\
5272 serge 214
		flags = _raw_spin_lock_irqsave_nested(lock, subclass);	\
1408 serge 215
	} while (0)
216
#else
5272 serge 217
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
1408 serge 218
	do {								\
219
		typecheck(unsigned long, flags);			\
5272 serge 220
		flags = _raw_spin_lock_irqsave(lock);			\
1408 serge 221
	} while (0)
222
#endif
223
 
224
#else
225
 
5272 serge 226
#define raw_spin_lock_irqsave(lock, flags)		\
1408 serge 227
	do {						\
228
		typecheck(unsigned long, flags);	\
5272 serge 229
		_raw_spin_lock_irqsave(lock, flags);	\
1408 serge 230
	} while (0)
231
 
5272 serge 232
#define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
233
	raw_spin_lock_irqsave(lock, flags)
234
 
1408 serge 235
#endif
236
 
5272 serge 237
#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
238
#define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
239
#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
240
#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
1408 serge 241
 
5272 serge 242
#define raw_spin_unlock_irqrestore(lock, flags)		\
6082 serge 243
	do {							\
244
		typecheck(unsigned long, flags);		\
5272 serge 245
		_raw_spin_unlock_irqrestore(lock, flags);	\
1408 serge 246
	} while (0)
5272 serge 247
#define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
1408 serge 248
 
5272 serge 249
#define raw_spin_trylock_bh(lock) \
250
	__cond_lock(lock, _raw_spin_trylock_bh(lock))
1408 serge 251
 
5272 serge 252
#define raw_spin_trylock_irq(lock) \
1408 serge 253
({ \
254
	local_irq_disable(); \
5272 serge 255
	raw_spin_trylock(lock) ? \
1408 serge 256
	1 : ({ local_irq_enable(); 0;  }); \
257
})
258
 
5272 serge 259
#define raw_spin_trylock_irqsave(lock, flags) \
1408 serge 260
({ \
261
	local_irq_save(flags); \
5272 serge 262
	raw_spin_trylock(lock) ? \
1408 serge 263
	1 : ({ local_irq_restore(flags); 0; }); \
264
})
265
 
5272 serge 266
/**
267
 * raw_spin_can_lock - would raw_spin_trylock() succeed?
268
 * @lock: the spinlock in question.
269
 */
270
#define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
271
 
272
/* Include rwlock functions */
273
#include 
274
 
275
/*
276
 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
277
 */
278
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
279
# include 
280
#else
281
# include 
282
#endif
283
 
284
/*
285
 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
286
 */
287
 
6082 serge 288
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
5272 serge 289
{
290
	return &lock->rlock;
291
}
292
 
293
#define spin_lock_init(_lock)				\
294
do {							\
295
	spinlock_check(_lock);				\
296
	raw_spin_lock_init(&(_lock)->rlock);		\
297
} while (0)
298
 
6082 serge 299
static __always_inline void spin_lock(spinlock_t *lock)
5272 serge 300
{
301
	raw_spin_lock(&lock->rlock);
302
}
303
 
6082 serge 304
static __always_inline void spin_lock_bh(spinlock_t *lock)
5272 serge 305
{
306
	raw_spin_lock_bh(&lock->rlock);
307
}
308
 
6082 serge 309
static __always_inline int spin_trylock(spinlock_t *lock)
5272 serge 310
{
311
	return raw_spin_trylock(&lock->rlock);
312
}
313
 
314
#define spin_lock_nested(lock, subclass)			\
315
do {								\
316
	raw_spin_lock_nested(spinlock_check(lock), subclass);	\
317
} while (0)
318
 
6082 serge 319
#define spin_lock_bh_nested(lock, subclass)			\
320
do {								\
321
	raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
322
} while (0)
323
 
5272 serge 324
#define spin_lock_nest_lock(lock, nest_lock)				\
325
do {									\
326
	raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);	\
327
} while (0)
328
 
6082 serge 329
static __always_inline void spin_lock_irq(spinlock_t *lock)
5272 serge 330
{
331
	raw_spin_lock_irq(&lock->rlock);
332
}
333
 
334
#define spin_lock_irqsave(lock, flags)				\
335
do {								\
336
	raw_spin_lock_irqsave(spinlock_check(lock), flags);	\
337
} while (0)
338
 
339
#define spin_lock_irqsave_nested(lock, flags, subclass)			\
340
do {									\
341
	raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
342
} while (0)
343
 
6082 serge 344
static __always_inline void spin_unlock(spinlock_t *lock)
5272 serge 345
{
346
	raw_spin_unlock(&lock->rlock);
347
}
348
 
6082 serge 349
static __always_inline void spin_unlock_bh(spinlock_t *lock)
5272 serge 350
{
351
	raw_spin_unlock_bh(&lock->rlock);
352
}
353
 
6082 serge 354
static __always_inline void spin_unlock_irq(spinlock_t *lock)
5272 serge 355
{
356
	raw_spin_unlock_irq(&lock->rlock);
357
}
358
 
6082 serge 359
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
5272 serge 360
{
361
	raw_spin_unlock_irqrestore(&lock->rlock, flags);
362
}
363
 
6082 serge 364
static __always_inline int spin_trylock_bh(spinlock_t *lock)
5272 serge 365
{
366
	return raw_spin_trylock_bh(&lock->rlock);
367
}
368
 
6082 serge 369
static __always_inline int spin_trylock_irq(spinlock_t *lock)
5272 serge 370
{
371
	return raw_spin_trylock_irq(&lock->rlock);
372
}
373
 
374
#define spin_trylock_irqsave(lock, flags)			\
6082 serge 375
({								\
5272 serge 376
	raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
1408 serge 377
})
378
 
6082 serge 379
static __always_inline void spin_unlock_wait(spinlock_t *lock)
5272 serge 380
{
381
	raw_spin_unlock_wait(&lock->rlock);
382
}
383
 
6082 serge 384
static __always_inline int spin_is_locked(spinlock_t *lock)
5272 serge 385
{
386
	return raw_spin_is_locked(&lock->rlock);
387
}
388
 
6082 serge 389
static __always_inline int spin_is_contended(spinlock_t *lock)
5272 serge 390
{
391
	return raw_spin_is_contended(&lock->rlock);
392
}
393
 
6082 serge 394
static __always_inline int spin_can_lock(spinlock_t *lock)
5272 serge 395
{
396
	return raw_spin_can_lock(&lock->rlock);
397
}
398
 
399
#define assert_spin_locked(lock)	assert_raw_spin_locked(&(lock)->rlock)
400
 
1408 serge 401
/*
402
 * Pull the atomic_t declaration:
403
 * (asm-mips/atomic.h needs above definitions)
404
 */
5272 serge 405
#include 
1408 serge 406
/**
407
 * atomic_dec_and_lock - lock on reaching reference count zero
408
 * @atomic: the atomic counter
409
 * @lock: the spinlock in question
410
 *
411
 * Decrements @atomic by 1.  If the result is 0, returns true and locks
412
 * @lock.  Returns false for all other cases.
413
 */
414
extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
415
#define atomic_dec_and_lock(atomic, lock) \
416
		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
417
 
418
#endif /* __LINUX_SPINLOCK_H */