/drivers/include/asm/preempt.h |
---|
0,0 → 1,109 |
#ifndef __ASM_PREEMPT_H |
#define __ASM_PREEMPT_H |
#include <asm/rmwcc.h> |
#include <asm/percpu.h> |
//#include <linux/thread_info.h> |
DECLARE_PER_CPU(int, __preempt_count); |
/* |
* We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such |
* that a decrement hitting 0 means we can and should reschedule. |
*/ |
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) |
/* |
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
* that think a non-zero value indicates we cannot preempt. |
*/ |
static __always_inline int preempt_count(void) |
{ |
return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED; |
} |
static __always_inline void preempt_count_set(int pc) |
{ |
raw_cpu_write_4(__preempt_count, pc); |
} |
/* |
* must be macros to avoid header recursion hell |
*/ |
#define init_task_preempt_count(p) do { \ |
task_thread_info(p)->saved_preempt_count = PREEMPT_DISABLED; \ |
} while (0) |
#define init_idle_preempt_count(p, cpu) do { \ |
task_thread_info(p)->saved_preempt_count = PREEMPT_ENABLED; \ |
per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ |
} while (0) |
/* |
* We fold the NEED_RESCHED bit into the preempt count such that |
* preempt_enable() can decrement and test for needing to reschedule with a |
* single instruction. |
* |
* We invert the actual bit, so that when the decrement hits 0 we know we both |
* need to resched (the bit is cleared) and can resched (no preempt count). |
*/ |
static __always_inline void set_preempt_need_resched(void) |
{ |
raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED); |
} |
static __always_inline void clear_preempt_need_resched(void) |
{ |
raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED); |
} |
static __always_inline bool test_preempt_need_resched(void) |
{ |
return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED); |
} |
/* |
* The various preempt_count add/sub methods |
*/ |
static __always_inline void __preempt_count_add(int val) |
{ |
raw_cpu_add_4(__preempt_count, val); |
} |
static __always_inline void __preempt_count_sub(int val) |
{ |
raw_cpu_add_4(__preempt_count, -val); |
} |
/* |
* Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule |
* a decrement which hits zero means we have no preempt_count and should |
* reschedule. |
*/ |
static __always_inline bool __preempt_count_dec_and_test(void) |
{ |
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |
} |
/* |
* Returns true when we need to resched and can (barring IRQ state). |
*/ |
static __always_inline bool should_resched(void) |
{ |
return unlikely(!raw_cpu_read_4(__preempt_count)); |
} |
#ifdef CONFIG_PREEMPT |
extern asmlinkage void ___preempt_schedule(void); |
# define __preempt_schedule() asm ("call ___preempt_schedule") |
extern asmlinkage void preempt_schedule(void); |
# ifdef CONFIG_CONTEXT_TRACKING |
extern asmlinkage void ___preempt_schedule_context(void); |
# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") |
extern asmlinkage void preempt_schedule_context(void); |
# endif |
#endif |
#endif /* __ASM_PREEMPT_H */ |
/drivers/include/asm/rwsem.h |
---|
0,0 → 1,225 |
/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ |
* |
* Written by David Howells (dhowells@redhat.com). |
* |
* Derived from asm-x86/semaphore.h |
* |
* |
* The MSW of the count is the negated number of active writers and waiting |
* lockers, and the LSW is the total number of active locks |
* |
* The lock count is initialized to 0 (no active and no waiting lockers). |
* |
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an |
* uncontended lock. This can be determined because XADD returns the old value. |
* Readers increment by 1 and see a positive value when uncontended, negative |
* if there are writers (and maybe) readers waiting (in which case it goes to |
* sleep). |
* |
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can |
* be extended to 65534 by manually checking the whole MSW rather than relying |
* on the S flag. |
* |
* The value of ACTIVE_BIAS supports up to 65535 active processes. |
* |
* This should be totally fair - if anything is waiting, a process that wants a |
* lock will go to the back of the queue. When the currently active lock is |
* released, if there's a writer at the front of the queue, then that and only |
* that will be woken up; if there's a bunch of consequtive readers at the |
* front, then they'll all be woken up, but no other readers will be. |
*/ |
#ifndef _ASM_X86_RWSEM_H |
#define _ASM_X86_RWSEM_H |
#ifndef _LINUX_RWSEM_H |
#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" |
#endif |
#ifdef __KERNEL__ |
#include <asm/asm.h> |
/* |
* The bias values and the counter type limits the number of |
* potential readers/writers to 32767 for 32 bits and 2147483647 |
* for 64 bits. |
*/ |
#ifdef CONFIG_X86_64 |
# define RWSEM_ACTIVE_MASK 0xffffffffL |
#else |
# define RWSEM_ACTIVE_MASK 0x0000ffffL |
#endif |
#define RWSEM_UNLOCKED_VALUE 0x00000000L |
#define RWSEM_ACTIVE_BIAS 0x00000001L |
#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1) |
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
/* |
* lock for reading |
*/ |
static inline void __down_read(struct rw_semaphore *sem) |
{ |
asm volatile("# beginning down_read\n\t" |
LOCK_PREFIX _ASM_INC "(%1)\n\t" |
/* adds 0x00000001 */ |
" jns 1f\n" |
" call call_rwsem_down_read_failed\n" |
"1:\n\t" |
"# ending down_read\n\t" |
: "+m" (sem->count) |
: "a" (sem) |
: "memory", "cc"); |
} |
/* |
* trylock for reading -- returns 1 if successful, 0 if contention |
*/ |
static inline int __down_read_trylock(struct rw_semaphore *sem) |
{ |
long result, tmp; |
asm volatile("# beginning __down_read_trylock\n\t" |
" mov %0,%1\n\t" |
"1:\n\t" |
" mov %1,%2\n\t" |
" add %3,%2\n\t" |
" jle 2f\n\t" |
LOCK_PREFIX " cmpxchg %2,%0\n\t" |
" jnz 1b\n\t" |
"2:\n\t" |
"# ending __down_read_trylock\n\t" |
: "+m" (sem->count), "=&a" (result), "=&r" (tmp) |
: "i" (RWSEM_ACTIVE_READ_BIAS) |
: "memory", "cc"); |
return result >= 0 ? 1 : 0; |
} |
/* |
* lock for writing |
*/ |
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) |
{ |
long tmp; |
asm volatile("# beginning down_write\n\t" |
LOCK_PREFIX " xadd %1,(%2)\n\t" |
/* adds 0xffff0001, returns the old value */ |
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" |
/* was the active mask 0 before? */ |
" jz 1f\n" |
" call call_rwsem_down_write_failed\n" |
"1:\n" |
"# ending down_write" |
: "+m" (sem->count), "=d" (tmp) |
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) |
: "memory", "cc"); |
} |
static inline void __down_write(struct rw_semaphore *sem) |
{ |
__down_write_nested(sem, 0); |
} |
/* |
* trylock for writing -- returns 1 if successful, 0 if contention |
*/ |
static inline int __down_write_trylock(struct rw_semaphore *sem) |
{ |
long result, tmp; |
asm volatile("# beginning __down_write_trylock\n\t" |
" mov %0,%1\n\t" |
"1:\n\t" |
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" |
/* was the active mask 0 before? */ |
" jnz 2f\n\t" |
" mov %1,%2\n\t" |
" add %3,%2\n\t" |
LOCK_PREFIX " cmpxchg %2,%0\n\t" |
" jnz 1b\n\t" |
"2:\n\t" |
" sete %b1\n\t" |
" movzbl %b1, %k1\n\t" |
"# ending __down_write_trylock\n\t" |
: "+m" (sem->count), "=&a" (result), "=&r" (tmp) |
: "er" (RWSEM_ACTIVE_WRITE_BIAS) |
: "memory", "cc"); |
return result; |
} |
/* |
* unlock after reading |
*/ |
static inline void __up_read(struct rw_semaphore *sem) |
{ |
long tmp; |
asm volatile("# beginning __up_read\n\t" |
LOCK_PREFIX " xadd %1,(%2)\n\t" |
/* subtracts 1, returns the old value */ |
" jns 1f\n\t" |
" call call_rwsem_wake\n" /* expects old value in %edx */ |
"1:\n" |
"# ending __up_read\n" |
: "+m" (sem->count), "=d" (tmp) |
: "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS) |
: "memory", "cc"); |
} |
/* |
* unlock after writing |
*/ |
static inline void __up_write(struct rw_semaphore *sem) |
{ |
long tmp; |
asm volatile("# beginning __up_write\n\t" |
LOCK_PREFIX " xadd %1,(%2)\n\t" |
/* subtracts 0xffff0001, returns the old value */ |
" jns 1f\n\t" |
" call call_rwsem_wake\n" /* expects old value in %edx */ |
"1:\n\t" |
"# ending __up_write\n" |
: "+m" (sem->count), "=d" (tmp) |
: "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) |
: "memory", "cc"); |
} |
/* |
* downgrade write lock to read lock |
*/ |
static inline void __downgrade_write(struct rw_semaphore *sem) |
{ |
asm volatile("# beginning __downgrade_write\n\t" |
LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" |
/* |
* transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) |
* 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) |
*/ |
" jns 1f\n\t" |
" call call_rwsem_downgrade_wake\n" |
"1:\n\t" |
"# ending __downgrade_write\n" |
: "+m" (sem->count) |
: "a" (sem), "er" (-RWSEM_WAITING_BIAS) |
: "memory", "cc"); |
} |
/* |
* implement atomic add functionality |
*/ |
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem) |
{ |
asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" |
: "+m" (sem->count) |
: "er" (delta)); |
} |
/* |
* implement exchange and add functionality |
*/ |
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) |
{ |
return delta + xadd(&sem->count, delta); |
} |
#endif /* __KERNEL__ */ |
#endif /* _ASM_X86_RWSEM_H */ |
/drivers/include/linux/bottom_half.h |
---|
0,0 → 1,35 |
#ifndef _LINUX_BH_H |
#define _LINUX_BH_H |
#include <linux/preempt.h> |
#include <linux/preempt_mask.h> |
#ifdef CONFIG_TRACE_IRQFLAGS |
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); |
#else |
static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
{ |
preempt_count_add(cnt); |
barrier(); |
} |
#endif |
static inline void local_bh_disable(void) |
{ |
__local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); |
} |
extern void _local_bh_enable(void); |
extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); |
static inline void local_bh_enable_ip(unsigned long ip) |
{ |
__local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); |
} |
static inline void local_bh_enable(void) |
{ |
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); |
} |
#endif /* _LINUX_BH_H */ |
/drivers/include/linux/kernel.h |
---|
607,15 → 607,7 |
return dev->driver_data; |
} |
#define preempt_disable() do { } while (0) |
#define preempt_enable_no_resched() do { } while (0) |
#define preempt_enable() do { } while (0) |
#define preempt_check_resched() do { } while (0) |
#define preempt_disable_notrace() do { } while (0) |
#define preempt_enable_no_resched_notrace() do { } while (0) |
#define preempt_enable_notrace() do { } while (0) |
#define in_dbg_master() (0) |
#define HZ 100 |
738,26 → 730,8 |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
//#define RCU_INIT_POINTER(p, v) \ |
// do { \ |
// p = (typeof(*v) __force __rcu *)(v); \ |
// } while (0) |
//#define rcu_dereference_raw(p) ({ \ |
// typeof(p) _________p1 = ACCESS_ONCE(p); \ |
// (_________p1); \ |
// }) |
//#define rcu_assign_pointer(p, v) \ |
// ({ \ |
// if (!__builtin_constant_p(v) || \ |
// ((v) != NULL)) \ |
// (p) = (v); \ |
// }) |
#define cpufreq_quick_get_max(x) GetCpuFreq() |
extern unsigned int tsc_khz; |
/drivers/include/linux/mutex.h |
---|
12,9 → 12,10 |
#include <asm/current.h> |
#include <linux/list.h> |
#include <asm/atomic.h> |
#include <linux/spinlock_types.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <linux/atomic.h> |
#include <asm/processor.h> |
/* |
/drivers/include/linux/preempt.h |
---|
0,0 → 1,195 |
#ifndef __LINUX_PREEMPT_H |
#define __LINUX_PREEMPT_H |
/* |
* include/linux/preempt.h - macros for accessing and manipulating |
* preempt_count (used for kernel preemption, interrupt count, etc.) |
*/ |
#include <linux/linkage.h> |
#include <linux/list.h> |
/* |
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for |
* the other bits -- can't include that header due to inclusion hell. |
*/ |
#define PREEMPT_NEED_RESCHED 0x80000000 |
#include <asm/preempt.h> |
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
extern void preempt_count_add(int val); |
extern void preempt_count_sub(int val); |
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) |
#else |
#define preempt_count_add(val) __preempt_count_add(val) |
#define preempt_count_sub(val) __preempt_count_sub(val) |
#define preempt_count_dec_and_test() __preempt_count_dec_and_test() |
#endif |
#define __preempt_count_inc() __preempt_count_add(1) |
#define __preempt_count_dec() __preempt_count_sub(1) |
#define preempt_count_inc() preempt_count_add(1) |
#define preempt_count_dec() preempt_count_sub(1) |
#ifdef CONFIG_PREEMPT_COUNT |
#define preempt_disable() \ |
do { \ |
preempt_count_inc(); \ |
barrier(); \ |
} while (0) |
#define sched_preempt_enable_no_resched() \ |
do { \ |
barrier(); \ |
preempt_count_dec(); \ |
} while (0) |
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
#ifdef CONFIG_PREEMPT |
#define preempt_enable() \ |
do { \ |
barrier(); \ |
if (unlikely(preempt_count_dec_and_test())) \ |
__preempt_schedule(); \ |
} while (0) |
#define preempt_check_resched() \ |
do { \ |
if (should_resched()) \ |
__preempt_schedule(); \ |
} while (0) |
#else |
#define preempt_enable() \ |
do { \ |
barrier(); \ |
preempt_count_dec(); \ |
} while (0) |
#define preempt_check_resched() do { } while (0) |
#endif |
#define preempt_disable_notrace() \ |
do { \ |
__preempt_count_inc(); \ |
barrier(); \ |
} while (0) |
#define preempt_enable_no_resched_notrace() \ |
do { \ |
barrier(); \ |
__preempt_count_dec(); \ |
} while (0) |
#ifdef CONFIG_PREEMPT |
#ifndef CONFIG_CONTEXT_TRACKING |
#define __preempt_schedule_context() __preempt_schedule() |
#endif |
#define preempt_enable_notrace() \ |
do { \ |
barrier(); \ |
if (unlikely(__preempt_count_dec_and_test())) \ |
__preempt_schedule_context(); \ |
} while (0) |
#else |
#define preempt_enable_notrace() \ |
do { \ |
barrier(); \ |
__preempt_count_dec(); \ |
} while (0) |
#endif |
#else /* !CONFIG_PREEMPT_COUNT */ |
/* |
* Even if we don't have any preemption, we need preempt disable/enable |
* to be barriers, so that we don't have things like get_user/put_user |
* that can cause faults and scheduling migrate into our preempt-protected |
* region. |
*/ |
#define preempt_disable() barrier() |
#define sched_preempt_enable_no_resched() barrier() |
#define preempt_enable_no_resched() barrier() |
#define preempt_enable() barrier() |
#define preempt_check_resched() do { } while (0) |
#define preempt_disable_notrace() barrier() |
#define preempt_enable_no_resched_notrace() barrier() |
#define preempt_enable_notrace() barrier() |
#endif /* CONFIG_PREEMPT_COUNT */ |
#ifdef MODULE |
/* |
* Modules have no business playing preemption tricks. |
*/ |
#undef sched_preempt_enable_no_resched |
#undef preempt_enable_no_resched |
#undef preempt_enable_no_resched_notrace |
#undef preempt_check_resched |
#endif |
#define preempt_set_need_resched() \ |
do { \ |
set_preempt_need_resched(); \ |
} while (0) |
#define preempt_fold_need_resched() \ |
do { \ |
if (tif_need_resched()) \ |
set_preempt_need_resched(); \ |
} while (0) |
#ifdef CONFIG_PREEMPT_NOTIFIERS |
struct preempt_notifier; |
/** |
* preempt_ops - notifiers called when a task is preempted and rescheduled |
* @sched_in: we're about to be rescheduled: |
* notifier: struct preempt_notifier for the task being scheduled |
* cpu: cpu we're scheduled on |
* @sched_out: we've just been preempted |
* notifier: struct preempt_notifier for the task being preempted |
* next: the task that's kicking us out |
* |
* Please note that sched_in and out are called under different |
* contexts. sched_out is called with rq lock held and irq disabled |
* while sched_in is called without rq lock and irq enabled. This |
* difference is intentional and depended upon by its users. |
*/ |
struct preempt_ops { |
void (*sched_in)(struct preempt_notifier *notifier, int cpu); |
void (*sched_out)(struct preempt_notifier *notifier, |
struct task_struct *next); |
}; |
/** |
* preempt_notifier - key for installing preemption notifiers |
* @link: internal use |
* @ops: defines the notifier functions to be called |
* |
* Usually used in conjunction with container_of(). |
*/ |
struct preempt_notifier { |
struct hlist_node link; |
struct preempt_ops *ops; |
}; |
void preempt_notifier_register(struct preempt_notifier *notifier); |
void preempt_notifier_unregister(struct preempt_notifier *notifier); |
static inline void preempt_notifier_init(struct preempt_notifier *notifier, |
struct preempt_ops *ops) |
{ |
INIT_HLIST_NODE(¬ifier->link); |
notifier->ops = ops; |
} |
#endif |
#endif /* __LINUX_PREEMPT_H */ |
/drivers/include/linux/preempt_mask.h |
---|
0,0 → 1,117 |
#ifndef LINUX_PREEMPT_MASK_H |
#define LINUX_PREEMPT_MASK_H |
#include <linux/preempt.h> |
/* |
* We put the hardirq and softirq counter into the preemption |
* counter. The bitmask has the following meaning: |
* |
* - bits 0-7 are the preemption count (max preemption depth: 256) |
* - bits 8-15 are the softirq count (max # of softirqs: 256) |
* |
* The hardirq count could in theory be the same as the number of |
* interrupts in the system, but we run all interrupt handlers with |
* interrupts disabled, so we cannot have nesting interrupts. Though |
* there are a few palaeontologic drivers which reenable interrupts in |
* the handler, so we need more than one bit here. |
* |
* PREEMPT_MASK: 0x000000ff |
* SOFTIRQ_MASK: 0x0000ff00 |
* HARDIRQ_MASK: 0x000f0000 |
* NMI_MASK: 0x00100000 |
* PREEMPT_ACTIVE: 0x00200000 |
*/ |
#define PREEMPT_BITS 8 |
#define SOFTIRQ_BITS 8 |
#define HARDIRQ_BITS 4 |
#define NMI_BITS 1 |
#define PREEMPT_SHIFT 0 |
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) |
#define __IRQ_MASK(x) ((1UL << (x))-1) |
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) |
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
#define NMI_OFFSET (1UL << NMI_SHIFT) |
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
#define PREEMPT_ACTIVE_BITS 1 |
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) |
#define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
#define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
| NMI_MASK)) |
/* |
* Are we doing bottom half or hardware interrupt processing? |
* Are we in a softirq context? Interrupt context? |
* in_softirq - Are we currently processing softirq or have bh disabled? |
* in_serving_softirq - Are we currently processing softirq? |
*/ |
#define in_irq() (hardirq_count()) |
#define in_softirq() (softirq_count()) |
#define in_interrupt() (irq_count()) |
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
/* |
* Are we in NMI context? |
*/ |
#define in_nmi() (preempt_count() & NMI_MASK) |
#if defined(CONFIG_PREEMPT_COUNT) |
# define PREEMPT_CHECK_OFFSET 1 |
#else |
# define PREEMPT_CHECK_OFFSET 0 |
#endif |
/* |
* The preempt_count offset needed for things like: |
* |
* spin_lock_bh() |
* |
* Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and |
* softirqs, such that unlock sequences of: |
* |
* spin_unlock(); |
* local_bh_enable(); |
* |
* Work as expected. |
*/ |
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET) |
/* |
* Are we running in atomic context? WARNING: this macro cannot |
* always detect atomic context; in particular, it cannot know about |
* held spinlocks in non-preemptible kernels. Thus it should not be |
* used in the general case to determine whether sleeping is possible. |
* Do not use in_atomic() in driver code. |
*/ |
#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) |
/* |
* Check whether we were atomic before we did preempt_disable(): |
* (used by the scheduler, *after* releasing the kernel lock) |
*/ |
#define in_atomic_preempt_off() \ |
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) |
#ifdef CONFIG_PREEMPT_COUNT |
# define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
#else |
# define preemptible() 0 |
#endif |
#endif /* LINUX_PREEMPT_MASK_H */ |
/drivers/include/linux/rwlock_types.h |
---|
0,0 → 1,48 |
#ifndef __LINUX_RWLOCK_TYPES_H |
#define __LINUX_RWLOCK_TYPES_H |
/* |
* include/linux/rwlock_types.h - generic rwlock type definitions |
* and initializers |
* |
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar |
* Released under the General Public License (GPL). |
*/ |
typedef struct { |
arch_rwlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
unsigned int magic, owner_cpu; |
void *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} rwlock_t; |
#define RWLOCK_MAGIC 0xdeaf1eed |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
#else |
# define RW_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ |
.magic = RWLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
RW_DEP_MAP_INIT(lockname) } |
#else |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ |
RW_DEP_MAP_INIT(lockname) } |
#endif |
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) |
#endif /* __LINUX_RWLOCK_TYPES_H */ |
/drivers/include/linux/rwsem.h |
---|
0,0 → 1,180 |
/* rwsem.h: R/W semaphores, public interface |
* |
* Written by David Howells (dhowells@redhat.com). |
* Derived from asm-i386/semaphore.h |
*/ |
#ifndef _LINUX_RWSEM_H |
#define _LINUX_RWSEM_H |
#include <linux/linkage.h> |
#include <linux/types.h> |
#include <linux/kernel.h> |
#include <linux/list.h> |
#include <linux/spinlock.h> |
#include <linux/atomic.h> |
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
#include <linux/osq_lock.h> |
#endif |
struct rw_semaphore; |
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK |
#include <linux/rwsem-spinlock.h> /* use a generic implementation */ |
#else |
/* All arch specific implementations share the same struct */ |
struct rw_semaphore { |
long count; |
struct list_head wait_list; |
raw_spinlock_t wait_lock; |
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
struct optimistic_spin_queue osq; /* spinner MCS lock */ |
/* |
* Write owner. Used as a speculative check to see |
* if the owner is running on the cpu. |
*/ |
struct task_struct *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
}; |
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); |
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); |
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); |
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); |
/* Include the arch specific part */ |
#include <asm/rwsem.h> |
/* In all implementations count != 0 means locked */ |
static inline int rwsem_is_locked(struct rw_semaphore *sem) |
{ |
return sem->count != 0; |
} |
#endif |
/* Common initializer macros and functions */ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } |
#else |
# define __RWSEM_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL |
#else |
#define __RWSEM_OPT_INIT(lockname) |
#endif |
#define __RWSEM_INITIALIZER(name) \ |
{ .count = RWSEM_UNLOCKED_VALUE, \ |
.wait_list = LIST_HEAD_INIT((name).wait_list), \ |
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ |
__RWSEM_OPT_INIT(name) \ |
__RWSEM_DEP_MAP_INIT(name) } |
#define DECLARE_RWSEM(name) \ |
struct rw_semaphore name = __RWSEM_INITIALIZER(name) |
extern void __init_rwsem(struct rw_semaphore *sem, const char *name, |
struct lock_class_key *key); |
#define init_rwsem(sem) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__init_rwsem((sem), #sem, &__key); \ |
} while (0) |
/* |
* This is the same regardless of which rwsem implementation that is being used. |
* It is just a heuristic meant to be called by somebody alreadying holding the |
* rwsem to see if somebody from an incompatible type is wanting access to the |
* lock. |
*/ |
static inline int rwsem_is_contended(struct rw_semaphore *sem) |
{ |
return !list_empty(&sem->wait_list); |
} |
/* |
* lock for reading |
*/ |
extern void down_read(struct rw_semaphore *sem); |
/* |
* trylock for reading -- returns 1 if successful, 0 if contention |
*/ |
extern int down_read_trylock(struct rw_semaphore *sem); |
/* |
* lock for writing |
*/ |
extern void down_write(struct rw_semaphore *sem); |
/* |
* trylock for writing -- returns 1 if successful, 0 if contention |
*/ |
extern int down_write_trylock(struct rw_semaphore *sem); |
/* |
* release a read lock |
*/ |
extern void up_read(struct rw_semaphore *sem); |
/* |
* release a write lock |
*/ |
extern void up_write(struct rw_semaphore *sem); |
/* |
* downgrade write lock to read lock |
*/ |
extern void downgrade_write(struct rw_semaphore *sem); |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
/* |
* nested locking. NOTE: rwsems are not allowed to recurse |
* (which occurs if the same task tries to acquire the same |
* lock instance multiple times), but multiple locks of the |
* same lock class might be taken, if the order of the locks |
* is always the same. This ordering rule can be expressed |
* to lockdep via the _nested() APIs, but enumerating the |
* subclasses that are used. (If the nesting relationship is |
* static then another method for expressing nested locking is |
* the explicit definition of lock class keys and the use of |
* lockdep_set_class() at lock initialization time. |
* See Documentation/locking/lockdep-design.txt for more details.) |
*/ |
extern void down_read_nested(struct rw_semaphore *sem, int subclass); |
extern void down_write_nested(struct rw_semaphore *sem, int subclass); |
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); |
# define down_write_nest_lock(sem, nest_lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ |
_down_write_nest_lock(sem, &(nest_lock)->dep_map); \ |
} while (0); |
/* |
* Take/release a lock when not the owner will release it. |
* |
* [ This API should be avoided as much as possible - the |
* proper abstraction for this case is completions. ] |
*/ |
extern void down_read_non_owner(struct rw_semaphore *sem); |
extern void up_read_non_owner(struct rw_semaphore *sem); |
#else |
# define down_read_nested(sem, subclass) down_read(sem) |
# define down_write_nest_lock(sem, nest_lock) down_write(sem) |
# define down_write_nested(sem, subclass) down_write(sem) |
# define down_read_non_owner(sem) down_read(sem) |
# define up_read_non_owner(sem) up_read(sem) |
#endif |
#endif /* _LINUX_RWSEM_H */ |
/drivers/include/linux/spinlock.h |
---|
47,13 → 47,13 |
*/ |
#include <linux/typecheck.h> |
//#include <linux/preempt.h> |
#include <linux/preempt.h> |
#include <linux/linkage.h> |
#include <linux/compiler.h> |
//#include <linux/thread_info.h> |
#include <linux/irqflags.h> |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
//#include <linux/bottom_half.h> |
#include <linux/bottom_half.h> |
#include <asm/barrier.h> |
89,234 → 89,322 |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void __spin_lock_init(spinlock_t *lock, const char *name, |
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
struct lock_class_key *key); |
# define spin_lock_init(lock) \ |
# define raw_spin_lock_init(lock) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__spin_lock_init((lock), #lock, &__key); \ |
__raw_spin_lock_init((lock), #lock, &__key); \ |
} while (0) |
#else |
# define spin_lock_init(lock) \ |
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) |
# define raw_spin_lock_init(lock) \ |
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void __rwlock_init(rwlock_t *lock, const char *name, |
struct lock_class_key *key); |
# define rwlock_init(lock) \ |
do { \ |
static struct lock_class_key __key; \ |
\ |
__rwlock_init((lock), #lock, &__key); \ |
} while (0) |
#else |
# define rwlock_init(lock) \ |
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) |
#endif |
#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
#ifdef CONFIG_GENERIC_LOCKBREAK |
#define spin_is_contended(lock) ((lock)->break_lock) |
#define raw_spin_is_contended(lock) ((lock)->break_lock) |
#else |
#ifdef __raw_spin_is_contended |
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) |
#ifdef arch_spin_is_contended |
#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
#else |
#define spin_is_contended(lock) (((void)(lock), 0)) |
#endif /*__raw_spin_is_contended*/ |
#define raw_spin_is_contended(lock) (((void)(lock), 0)) |
#endif /*arch_spin_is_contended*/ |
#endif |
/* The lock does not imply full memory barrier. */ |
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK |
static inline void smp_mb__after_lock(void) { smp_mb(); } |
/* |
* Despite its name it doesn't necessarily has to be a full barrier. |
* It should only guarantee that a STORE before the critical section |
* can not be reordered with a LOAD inside this section. |
* spin_lock() is the one-way barrier, this LOAD can not escape out |
* of the region. So the default implementation simply ensures that |
* a STORE can not move into the critical section, smp_wmb() should |
* serialize it with another STORE done by spin_lock(). |
*/ |
#ifndef smp_mb__before_spinlock |
#define smp_mb__before_spinlock() smp_wmb() |
#endif |
/* |
* Place this after a lock-acquisition primitive to guarantee that |
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies |
* if the UNLOCK and LOCK are executed by the same CPU or if the |
* UNLOCK and LOCK operate on the same lock variable. |
*/ |
#ifndef smp_mb__after_unlock_lock |
#define smp_mb__after_unlock_lock() do { } while (0) |
#endif |
/** |
* spin_unlock_wait - wait until the spinlock gets unlocked |
* raw_spin_unlock_wait - wait until the spinlock gets unlocked |
* @lock: the spinlock in question. |
*/ |
#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) |
#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
#ifdef CONFIG_DEBUG_SPINLOCK |
extern void _raw_spin_lock(spinlock_t *lock); |
#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) |
extern int _raw_spin_trylock(spinlock_t *lock); |
extern void _raw_spin_unlock(spinlock_t *lock); |
extern void _raw_read_lock(rwlock_t *lock); |
#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) |
extern int _raw_read_trylock(rwlock_t *lock); |
extern void _raw_read_unlock(rwlock_t *lock); |
extern void _raw_write_lock(rwlock_t *lock); |
#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) |
extern int _raw_write_trylock(rwlock_t *lock); |
extern void _raw_write_unlock(rwlock_t *lock); |
extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
#else |
# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) |
# define _raw_spin_lock_flags(lock, flags) \ |
__raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
# define _raw_read_lock_flags(lock, flags) \ |
__raw_read_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
# define _raw_write_lock_flags(lock, flags) \ |
__raw_write_lock_flags(&(lock)->raw_lock, *(flags)) |
# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock) |
{ |
__acquire(lock); |
arch_spin_lock(&lock->raw_lock); |
} |
static inline void |
do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock) |
{ |
__acquire(lock); |
arch_spin_lock_flags(&lock->raw_lock, *flags); |
} |
static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
{ |
return arch_spin_trylock(&(lock)->raw_lock); |
} |
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) |
{ |
arch_spin_unlock(&lock->raw_lock); |
__release(lock); |
} |
#endif |
#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) |
#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) |
/* |
* Define the various spin_lock and rw_lock methods. Note we define these |
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various |
* methods are defined as nops in the case they are not required. |
* Define the various spin_lock methods. Note we define these |
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The |
* various methods are defined as nops in the case they are not |
* required. |
*/ |
#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) |
#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) |
#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) |
#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
#define spin_lock(lock) _spin_lock(lock) |
#define raw_spin_lock(lock) _raw_spin_lock(lock) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) |
# define spin_lock_nest_lock(lock, nest_lock) \ |
# define raw_spin_lock_nested(lock, subclass) \ |
_raw_spin_lock_nested(lock, subclass) |
# define raw_spin_lock_nest_lock(lock, nest_lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
_raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
} while (0) |
#else |
# define spin_lock_nested(lock, subclass) _spin_lock(lock) |
# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) |
/* |
* Always evaluate the 'subclass' argument to avoid that the compiler |
* warns about set-but-not-used variables when building with |
* CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. |
*/ |
# define raw_spin_lock_nested(lock, subclass) \ |
_raw_spin_lock(((void)(subclass), (lock))) |
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
#endif |
#define write_lock(lock) _write_lock(lock) |
#define read_lock(lock) _read_lock(lock) |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
#define spin_lock_irqsave(lock, flags) \ |
#define raw_spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave(lock); \ |
flags = _raw_spin_lock_irqsave(lock); \ |
} while (0) |
#define read_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _read_lock_irqsave(lock); \ |
} while (0) |
#define write_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _write_lock_irqsave(lock); \ |
} while (0) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave_nested(lock, subclass); \ |
flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
} while (0) |
#else |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = _spin_lock_irqsave(lock); \ |
flags = _raw_spin_lock_irqsave(lock); \ |
} while (0) |
#endif |
#else |
#define spin_lock_irqsave(lock, flags) \ |
#define raw_spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_spin_lock_irqsave(lock, flags); \ |
_raw_spin_lock_irqsave(lock, flags); \ |
} while (0) |
#define read_lock_irqsave(lock, flags) \ |
#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
raw_spin_lock_irqsave(lock, flags) |
#endif |
#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
#define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
#define raw_spin_unlock_irqrestore(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_read_lock_irqsave(lock, flags); \ |
_raw_spin_unlock_irqrestore(lock, flags); \ |
} while (0) |
#define write_lock_irqsave(lock, flags) \ |
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
#define raw_spin_trylock_bh(lock) \ |
__cond_lock(lock, _raw_spin_trylock_bh(lock)) |
#define raw_spin_trylock_irq(lock) \ |
({ \ |
local_irq_disable(); \ |
raw_spin_trylock(lock) ? \ |
1 : ({ local_irq_enable(); 0; }); \ |
}) |
#define raw_spin_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
raw_spin_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
}) |
/** |
* raw_spin_can_lock - would raw_spin_trylock() succeed? |
* @lock: the spinlock in question. |
*/ |
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) |
/* Include rwlock functions */ |
#include <linux/rwlock.h> |
/* |
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
*/ |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
# include <linux/spinlock_api_smp.h> |
#else |
# include <linux/spinlock_api_up.h> |
#endif |
/* |
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n |
*/ |
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) |
{ |
return &lock->rlock; |
} |
#define spin_lock_init(_lock) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_write_lock_irqsave(lock, flags); \ |
spinlock_check(_lock); \ |
raw_spin_lock_init(&(_lock)->rlock); \ |
} while (0) |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
spin_lock_irqsave(lock, flags) |
#endif |
static inline void spin_lock(spinlock_t *lock) |
{ |
raw_spin_lock(&lock->rlock); |
} |
#define spin_lock_irq(lock) _spin_lock_irq(lock) |
#define spin_lock_bh(lock) _spin_lock_bh(lock) |
#define read_lock_irq(lock) _read_lock_irq(lock) |
#define read_lock_bh(lock) _read_lock_bh(lock) |
#define write_lock_irq(lock) _write_lock_irq(lock) |
#define write_lock_bh(lock) _write_lock_bh(lock) |
#define spin_unlock(lock) _spin_unlock(lock) |
#define read_unlock(lock) _read_unlock(lock) |
#define write_unlock(lock) _write_unlock(lock) |
#define spin_unlock_irq(lock) _spin_unlock_irq(lock) |
#define read_unlock_irq(lock) _read_unlock_irq(lock) |
#define write_unlock_irq(lock) _write_unlock_irq(lock) |
static inline void spin_lock_bh(spinlock_t *lock) |
{ |
raw_spin_lock_bh(&lock->rlock); |
} |
#define spin_unlock_irqrestore(lock, flags) \ |
static inline int spin_trylock(spinlock_t *lock) |
{ |
return raw_spin_trylock(&lock->rlock); |
} |
#define spin_lock_nested(lock, subclass) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_spin_unlock_irqrestore(lock, flags); \ |
raw_spin_lock_nested(spinlock_check(lock), subclass); \ |
} while (0) |
#define spin_unlock_bh(lock) _spin_unlock_bh(lock) |
#define read_unlock_irqrestore(lock, flags) \ |
#define spin_lock_nest_lock(lock, nest_lock) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_read_unlock_irqrestore(lock, flags); \ |
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ |
} while (0) |
#define read_unlock_bh(lock) _read_unlock_bh(lock) |
#define write_unlock_irqrestore(lock, flags) \ |
static inline void spin_lock_irq(spinlock_t *lock) |
{ |
raw_spin_lock_irq(&lock->rlock); |
} |
#define spin_lock_irqsave(lock, flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
_write_unlock_irqrestore(lock, flags); \ |
raw_spin_lock_irqsave(spinlock_check(lock), flags); \ |
} while (0) |
#define write_unlock_bh(lock) _write_unlock_bh(lock) |
#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) |
#define spin_lock_irqsave_nested(lock, flags, subclass) \ |
do { \ |
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ |
} while (0) |
#define spin_trylock_irq(lock) \ |
({ \ |
local_irq_disable(); \ |
spin_trylock(lock) ? \ |
1 : ({ local_irq_enable(); 0; }); \ |
}) |
static inline void spin_unlock(spinlock_t *lock) |
{ |
raw_spin_unlock(&lock->rlock); |
} |
static inline void spin_unlock_bh(spinlock_t *lock) |
{ |
raw_spin_unlock_bh(&lock->rlock); |
} |
static inline void spin_unlock_irq(spinlock_t *lock) |
{ |
raw_spin_unlock_irq(&lock->rlock); |
} |
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) |
{ |
raw_spin_unlock_irqrestore(&lock->rlock, flags); |
} |
static inline int spin_trylock_bh(spinlock_t *lock) |
{ |
return raw_spin_trylock_bh(&lock->rlock); |
} |
static inline int spin_trylock_irq(spinlock_t *lock) |
{ |
return raw_spin_trylock_irq(&lock->rlock); |
} |
#define spin_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
spin_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ |
}) |
#define write_trylock_irqsave(lock, flags) \ |
({ \ |
local_irq_save(flags); \ |
write_trylock(lock) ? \ |
1 : ({ local_irq_restore(flags); 0; }); \ |
}) |
static inline void spin_unlock_wait(spinlock_t *lock) |
{ |
raw_spin_unlock_wait(&lock->rlock); |
} |
static inline int spin_is_locked(spinlock_t *lock) |
{ |
return raw_spin_is_locked(&lock->rlock); |
} |
static inline int spin_is_contended(spinlock_t *lock) |
{ |
return raw_spin_is_contended(&lock->rlock); |
} |
static inline int spin_can_lock(spinlock_t *lock) |
{ |
return raw_spin_can_lock(&lock->rlock); |
} |
#define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) |
/* |
* Pull the atomic_t declaration: |
* (asm-mips/atomic.h needs above definitions) |
*/ |
#include <asm/atomic.h> |
#include <linux/atomic.h> |
/** |
* atomic_dec_and_lock - lock on reaching reference count zero |
* @atomic: the atomic counter |
329,25 → 417,4 |
#define atomic_dec_and_lock(atomic, lock) \ |
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
/** |
* spin_can_lock - would spin_trylock() succeed? |
* @lock: the spinlock in question. |
*/ |
#define spin_can_lock(lock) (!spin_is_locked(lock)) |
/* |
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations: |
*/ |
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
# include <linux/spinlock_api_smp.h> |
#else |
# include <linux/spinlock_api_up.h> |
#endif |
struct rw_semaphore { |
signed long count; |
spinlock_t wait_lock; |
struct list_head wait_list; |
}; |
#endif /* __LINUX_SPINLOCK_H */ |
/drivers/include/linux/spinlock_api_up.h |
---|
24,70 → 24,68 |
* flags straight, to suppress compiler warnings of unused lock |
* variables, and to add the proper checker annotations: |
*/ |
#define ___LOCK(lock) \ |
do { __acquire(lock); (void)(lock); } while (0) |
#define __LOCK(lock) \ |
do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) |
do { preempt_disable(); ___LOCK(lock); } while (0) |
#define __LOCK_BH(lock) \ |
do { local_bh_disable(); __LOCK(lock); } while (0) |
do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) |
#define __LOCK_IRQ(lock) \ |
do { asm volatile ("cli \n"); __LOCK(lock); } while (0) |
do { local_irq_disable(); __LOCK(lock); } while (0) |
#define __LOCK_IRQSAVE(lock, flags) \ |
do { \ |
__asm__ __volatile__ ( \ |
"pushf\n\t" \ |
"popl %0\n\t" \ |
"cli\n" \ |
: "=r" (flags)); \ |
__LOCK(lock); \ |
} while (0) \ |
do { local_irq_save(flags); __LOCK(lock); } while (0) |
#define ___UNLOCK(lock) \ |
do { __release(lock); (void)(lock); } while (0) |
#define __UNLOCK(lock) \ |
do { preempt_enable(); __release(lock); (void)(lock); } while (0) |
do { preempt_enable(); ___UNLOCK(lock); } while (0) |
#define __UNLOCK_BH(lock) \ |
do { preempt_enable_no_resched(); local_bh_enable(); \ |
__release(lock); (void)(lock); } while (0) |
do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \ |
___UNLOCK(lock); } while (0) |
#define __UNLOCK_IRQ(lock) \ |
do { asm volatile ("sti \n"); __UNLOCK(lock); } while (0) |
do { local_irq_enable(); __UNLOCK(lock); } while (0) |
#define __UNLOCK_IRQRESTORE(lock, flags) \ |
do { \ |
if (flags & (1<<9)) \ |
__asm__ __volatile__ ("sti"); \ |
__UNLOCK(lock); \ |
} while (0) |
do { local_irq_restore(flags); __UNLOCK(lock); } while (0) |
#define _spin_lock(lock) __LOCK(lock) |
#define _spin_lock_nested(lock, subclass) __LOCK(lock) |
#define _read_lock(lock) __LOCK(lock) |
#define _write_lock(lock) __LOCK(lock) |
#define _spin_lock_bh(lock) __LOCK_BH(lock) |
#define _read_lock_bh(lock) __LOCK_BH(lock) |
#define _write_lock_bh(lock) __LOCK_BH(lock) |
#define _spin_lock_irq(lock) __LOCK_IRQ(lock) |
#define _read_lock_irq(lock) __LOCK_IRQ(lock) |
#define _write_lock_irq(lock) __LOCK_IRQ(lock) |
#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _read_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _write_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) |
#define _spin_unlock(lock) __UNLOCK(lock) |
#define _read_unlock(lock) __UNLOCK(lock) |
#define _write_unlock(lock) __UNLOCK(lock) |
#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _write_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _read_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) |
#define _raw_spin_lock(lock) __LOCK(lock) |
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) |
#define _raw_read_lock(lock) __LOCK(lock) |
#define _raw_write_lock(lock) __LOCK(lock) |
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock) |
#define _raw_read_lock_bh(lock) __LOCK_BH(lock) |
#define _raw_write_lock_bh(lock) __LOCK_BH(lock) |
#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) |
#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) |
#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) |
#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) |
#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) |
#define _raw_spin_unlock(lock) __UNLOCK(lock) |
#define _raw_read_unlock(lock) __UNLOCK(lock) |
#define _raw_write_unlock(lock) __UNLOCK(lock) |
#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) |
#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) |
#define _raw_spin_unlock_irqrestore(lock, flags) \ |
__UNLOCK_IRQRESTORE(lock, flags) |
#define _raw_read_unlock_irqrestore(lock, flags) \ |
__UNLOCK_IRQRESTORE(lock, flags) |
#define _raw_write_unlock_irqrestore(lock, flags) \ |
__UNLOCK_IRQRESTORE(lock, flags) |
#endif /* __LINUX_SPINLOCK_API_UP_H */ |
/drivers/include/linux/spinlock_types.h |
---|
17,8 → 17,8 |
#include <linux/lockdep.h> |
typedef struct spinlock { |
raw_spinlock_t raw_lock; |
typedef struct raw_spinlock { |
arch_spinlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
29,26 → 29,10 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} spinlock_t; |
} raw_spinlock_t; |
#define SPINLOCK_MAGIC 0xdead4ead |
typedef struct { |
raw_rwlock_t raw_lock; |
#ifdef CONFIG_GENERIC_LOCKBREAK |
unsigned int break_lock; |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
unsigned int magic, owner_cpu; |
void *owner; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} rwlock_t; |
#define RWLOCK_MAGIC 0xdeaf1eed |
#define SPINLOCK_OWNER_INIT ((void *)-1L) |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
57,44 → 41,48 |
# define SPIN_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } |
#else |
# define RW_DEP_MAP_INIT(lockname) |
#endif |
#ifdef CONFIG_DEBUG_SPINLOCK |
# define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
# define SPIN_DEBUG_INIT(lockname) \ |
.magic = SPINLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
SPIN_DEP_MAP_INIT(lockname) } |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
.magic = RWLOCK_MAGIC, \ |
.owner = SPINLOCK_OWNER_INIT, \ |
.owner_cpu = -1, \ |
RW_DEP_MAP_INIT(lockname) } |
.owner = SPINLOCK_OWNER_INIT, |
#else |
# define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ |
# define SPIN_DEBUG_INIT(lockname) |
#endif |
#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ |
{ \ |
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ |
SPIN_DEBUG_INIT(lockname) \ |
SPIN_DEP_MAP_INIT(lockname) } |
#define __RW_LOCK_UNLOCKED(lockname) \ |
(rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ |
RW_DEP_MAP_INIT(lockname) } |
#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ |
(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) |
#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) |
typedef struct spinlock { |
union { |
struct raw_spinlock rlock; |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) |
struct { |
u8 __padding[LOCK_PADSIZE]; |
struct lockdep_map dep_map; |
}; |
#endif |
}; |
} spinlock_t; |
/* |
* SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and |
* are hence deprecated. |
* Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or |
* __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. |
*/ |
#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) |
#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) |
#define __SPIN_LOCK_INITIALIZER(lockname) \ |
{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } |
#define __SPIN_LOCK_UNLOCKED(lockname) \ |
(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) |
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) |
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) |
#include <linux/rwlock_types.h> |
#endif /* __LINUX_SPINLOCK_TYPES_H */ |
/drivers/include/linux/spinlock_types_up.h |
---|
16,22 → 16,22 |
typedef struct { |
volatile unsigned int slock; |
} raw_spinlock_t; |
} arch_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { 1 } |
#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
#else |
typedef struct { } raw_spinlock_t; |
typedef struct { } arch_spinlock_t; |
#define __RAW_SPIN_LOCK_UNLOCKED { } |
#define __ARCH_SPIN_LOCK_UNLOCKED { } |
#endif |
typedef struct { |
/* no debug version on UP */ |
} raw_rwlock_t; |
} arch_rwlock_t; |
#define __RAW_RW_LOCK_UNLOCKED { } |
#define __ARCH_RW_LOCK_UNLOCKED { } |
#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ |
/drivers/include/linux/spinlock_up.h |
---|
5,6 → 5,8 |
# error "please don't include this file directly" |
#endif |
#include <asm/processor.h> /* for cpu_relax() */ |
/* |
* include/linux/spinlock_up.h - UP-debug version of spinlocks. |
* |
21,31 → 23,35 |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
#define __raw_spin_is_locked(x) ((x)->slock == 0) |
#define arch_spin_is_locked(x) ((x)->slock == 0) |
static inline void __raw_spin_lock(raw_spinlock_t *lock) |
static inline void arch_spin_lock(arch_spinlock_t *lock) |
{ |
lock->slock = 0; |
barrier(); |
} |
static inline void |
__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) |
arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
{ |
local_irq_save(flags); |
lock->slock = 0; |
barrier(); |
} |
static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
static inline int arch_spin_trylock(arch_spinlock_t *lock) |
{ |
char oldval = lock->slock; |
lock->slock = 0; |
barrier(); |
return oldval > 0; |
} |
static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
static inline void arch_spin_unlock(arch_spinlock_t *lock) |
{ |
barrier(); |
lock->slock = 1; |
} |
52,28 → 58,28 |
/* |
* Read-write spinlocks. No debug version. |
*/ |
#define __raw_read_lock(lock) do { (void)(lock); } while (0) |
#define __raw_write_lock(lock) do { (void)(lock); } while (0) |
#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) |
#define __raw_read_unlock(lock) do { (void)(lock); } while (0) |
#define __raw_write_unlock(lock) do { (void)(lock); } while (0) |
#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
#else /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_locked(lock) ((void)(lock), 0) |
/* for sched.c and kernel_lock.c: */ |
# define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) |
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
#define arch_spin_is_locked(lock) ((void)(lock), 0) |
/* for sched/core.c and kernel_lock.c: */ |
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
#endif /* DEBUG_SPINLOCK */ |
#define __raw_spin_is_contended(lock) (((void)(lock), 0)) |
#define arch_spin_is_contended(lock) (((void)(lock), 0)) |
#define __raw_read_can_lock(lock) (((void)(lock), 1)) |
#define __raw_write_can_lock(lock) (((void)(lock), 1)) |
#define arch_read_can_lock(lock) (((void)(lock), 1)) |
#define arch_write_can_lock(lock) (((void)(lock), 1)) |
#define __raw_spin_unlock_wait(lock) \ |
do { cpu_relax(); } while (__raw_spin_is_locked(lock)) |
#define arch_spin_unlock_wait(lock) \ |
do { cpu_relax(); } while (arch_spin_is_locked(lock)) |
#endif /* __LINUX_SPINLOCK_UP_H */ |
/drivers/include/linux/uuid.h |
---|
17,42 → 17,23 |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
*/ |
#ifndef _LINUX_UUID_H_ |
#define _LINUX_UUID_H_ |
#ifndef _UAPI_LINUX_UUID_H_ |
#define _UAPI_LINUX_UUID_H_ |
#include <uapi/linux/uuid.h> |
#include <linux/types.h> |
#include <linux/string.h> |
typedef struct { |
__u8 b[16]; |
} uuid_le; |
static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2) |
{ |
return memcmp(&u1, &u2, sizeof(uuid_le)); |
} |
typedef struct { |
__u8 b[16]; |
} uuid_be; |
static inline int uuid_be_cmp(const uuid_be u1, const uuid_be u2) |
{ |
return memcmp(&u1, &u2, sizeof(uuid_be)); |
} |
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_le) \ |
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ |
(b) & 0xff, ((b) >> 8) & 0xff, \ |
(c) & 0xff, ((c) >> 8) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
extern void uuid_le_gen(uuid_le *u); |
extern void uuid_be_gen(uuid_be *u); |
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_be) \ |
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ |
((b) >> 8) & 0xff, (b) & 0xff, \ |
((c) >> 8) & 0xff, (c) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define NULL_UUID_LE \ |
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#define NULL_UUID_BE \ |
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#endif /* _UAPI_LINUX_UUID_H_ */ |
#endif |
/drivers/include/linux/workqueue.h |
---|
9,6 → 9,7 |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <linux/threads.h> |
#include <linux/atomic.h> |
#include <syscall.h> |
struct workqueue_struct; |
/drivers/include/syscall.h |
---|
543,19 → 543,5 |
static inline int power_supply_is_system_supplied(void) { return -1; } |
#define RWSEM_UNLOCKED_VALUE 0x00000000 |
#define RWSEM_ACTIVE_BIAS 0x00000001 |
#define RWSEM_ACTIVE_MASK 0x0000ffff |
#define RWSEM_WAITING_BIAS (-0x00010000) |
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
//static void init_rwsem(struct rw_semaphore *sem) |
//{ |
// sem->count = RWSEM_UNLOCKED_VALUE; |
// spin_lock_init(&sem->wait_lock); |
// INIT_LIST_HEAD(&sem->wait_list); |
//} |
#endif |
/drivers/include/uapi/drm/drm.h |
---|
39,7 → 39,7 |
#if defined(__KERNEL__) || defined(__linux__) |
#include <linux/types.h> |
//#include <asm/ioctl.h> |
#include <asm/ioctl.h> |
typedef unsigned int drm_handle_t; |
#else /* One of the BSDs */ |
/drivers/include/uapi/linux/uuid.h |
---|
0,0 → 1,58 |
/* |
* UUID/GUID definition |
* |
* Copyright (C) 2010, Intel Corp. |
* Huang Ying <ying.huang@intel.com> |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License version |
* 2 as published by the Free Software Foundation; |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
*/ |
#ifndef _UAPI_LINUX_UUID_H_ |
#define _UAPI_LINUX_UUID_H_ |
#include <linux/types.h> |
#include <linux/string.h> |
typedef struct { |
__u8 b[16]; |
} uuid_le; |
typedef struct { |
__u8 b[16]; |
} uuid_be; |
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_le) \ |
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ |
(b) & 0xff, ((b) >> 8) & 0xff, \ |
(c) & 0xff, ((c) >> 8) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_be) \ |
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ |
((b) >> 8) & 0xff, (b) & 0xff, \ |
((c) >> 8) & 0xff, (c) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define NULL_UUID_LE \ |
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#define NULL_UUID_BE \ |
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#endif /* _UAPI_LINUX_UUID_H_ */ |