Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5345 → Rev 6082

/drivers/include/linux/atomic.h
2,7 → 2,427
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
#include <asm/barrier.h>
 
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
* We support four variants:
*
* - Fully ordered: The default implementation, no suffix required.
* - Acquire: Provides ACQUIRE semantics, _acquire suffix.
* - Release: Provides RELEASE semantics, _release suffix.
* - Relaxed: No ordering guarantees, _relaxed suffix.
*
* For compound atomics performing both a load and a store, ACQUIRE
* semantics apply only to the load and RELEASE semantics only to the
* store portion of the operation. Note that a failed cmpxchg_acquire
* does -not- imply any memory ordering constraints.
*
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
 
#ifndef atomic_read_acquire
#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
 
#ifndef atomic_set_release
#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
 
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*/
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
 
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
 
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
smp_mb__before_atomic(); \
__ret = op##_relaxed(args); \
smp_mb__after_atomic(); \
__ret; \
})
 
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
#define atomic_add_return_relaxed atomic_add_return
#define atomic_add_return_acquire atomic_add_return
#define atomic_add_return_release atomic_add_return
 
#else /* atomic_add_return_relaxed */
 
#ifndef atomic_add_return_acquire
#define atomic_add_return_acquire(...) \
__atomic_op_acquire(atomic_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic_add_return_release
#define atomic_add_return_release(...) \
__atomic_op_release(atomic_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic_add_return
#define atomic_add_return(...) \
__atomic_op_fence(atomic_add_return, __VA_ARGS__)
#endif
#endif /* atomic_add_return_relaxed */
 
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
 
#else /* atomic_inc_return_relaxed */
 
#ifndef atomic_inc_return_acquire
#define atomic_inc_return_acquire(...) \
__atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic_inc_return_release
#define atomic_inc_return_release(...) \
__atomic_op_release(atomic_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic_inc_return
#define atomic_inc_return(...) \
__atomic_op_fence(atomic_inc_return, __VA_ARGS__)
#endif
#endif /* atomic_inc_return_relaxed */
 
/* atomic_sub_return_relaxed */
#ifndef atomic_sub_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return
#define atomic_sub_return_acquire atomic_sub_return
#define atomic_sub_return_release atomic_sub_return
 
#else /* atomic_sub_return_relaxed */
 
#ifndef atomic_sub_return_acquire
#define atomic_sub_return_acquire(...) \
__atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic_sub_return_release
#define atomic_sub_return_release(...) \
__atomic_op_release(atomic_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic_sub_return
#define atomic_sub_return(...) \
__atomic_op_fence(atomic_sub_return, __VA_ARGS__)
#endif
#endif /* atomic_sub_return_relaxed */
 
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
 
#else /* atomic_dec_return_relaxed */
 
#ifndef atomic_dec_return_acquire
#define atomic_dec_return_acquire(...) \
__atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic_dec_return_release
#define atomic_dec_return_release(...) \
__atomic_op_release(atomic_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic_dec_return
#define atomic_dec_return(...) \
__atomic_op_fence(atomic_dec_return, __VA_ARGS__)
#endif
#endif /* atomic_dec_return_relaxed */
 
/* atomic_xchg_relaxed */
#ifndef atomic_xchg_relaxed
#define atomic_xchg_relaxed atomic_xchg
#define atomic_xchg_acquire atomic_xchg
#define atomic_xchg_release atomic_xchg
 
#else /* atomic_xchg_relaxed */
 
#ifndef atomic_xchg_acquire
#define atomic_xchg_acquire(...) \
__atomic_op_acquire(atomic_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic_xchg_release
#define atomic_xchg_release(...) \
__atomic_op_release(atomic_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic_xchg
#define atomic_xchg(...) \
__atomic_op_fence(atomic_xchg, __VA_ARGS__)
#endif
#endif /* atomic_xchg_relaxed */
 
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg
#define atomic_cmpxchg_acquire atomic_cmpxchg
#define atomic_cmpxchg_release atomic_cmpxchg
 
#else /* atomic_cmpxchg_relaxed */
 
#ifndef atomic_cmpxchg_acquire
#define atomic_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic_cmpxchg_release
#define atomic_cmpxchg_release(...) \
__atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic_cmpxchg
#define atomic_cmpxchg(...) \
__atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic_cmpxchg_relaxed */
 
#ifndef atomic64_read_acquire
#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
#endif
 
#ifndef atomic64_set_release
#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
#endif
 
/* atomic64_add_return_relaxed */
#ifndef atomic64_add_return_relaxed
#define atomic64_add_return_relaxed atomic64_add_return
#define atomic64_add_return_acquire atomic64_add_return
#define atomic64_add_return_release atomic64_add_return
 
#else /* atomic64_add_return_relaxed */
 
#ifndef atomic64_add_return_acquire
#define atomic64_add_return_acquire(...) \
__atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_add_return_release
#define atomic64_add_return_release(...) \
__atomic_op_release(atomic64_add_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_add_return
#define atomic64_add_return(...) \
__atomic_op_fence(atomic64_add_return, __VA_ARGS__)
#endif
#endif /* atomic64_add_return_relaxed */
 
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
 
#else /* atomic64_inc_return_relaxed */
 
#ifndef atomic64_inc_return_acquire
#define atomic64_inc_return_acquire(...) \
__atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_inc_return_release
#define atomic64_inc_return_release(...) \
__atomic_op_release(atomic64_inc_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_inc_return
#define atomic64_inc_return(...) \
__atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
#endif
#endif /* atomic64_inc_return_relaxed */
 
 
/* atomic64_sub_return_relaxed */
#ifndef atomic64_sub_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return
#define atomic64_sub_return_acquire atomic64_sub_return
#define atomic64_sub_return_release atomic64_sub_return
 
#else /* atomic64_sub_return_relaxed */
 
#ifndef atomic64_sub_return_acquire
#define atomic64_sub_return_acquire(...) \
__atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_sub_return_release
#define atomic64_sub_return_release(...) \
__atomic_op_release(atomic64_sub_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_sub_return
#define atomic64_sub_return(...) \
__atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
#endif
#endif /* atomic64_sub_return_relaxed */
 
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
 
#else /* atomic64_dec_return_relaxed */
 
#ifndef atomic64_dec_return_acquire
#define atomic64_dec_return_acquire(...) \
__atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_dec_return_release
#define atomic64_dec_return_release(...) \
__atomic_op_release(atomic64_dec_return, __VA_ARGS__)
#endif
 
#ifndef atomic64_dec_return
#define atomic64_dec_return(...) \
__atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
#endif
#endif /* atomic64_dec_return_relaxed */
 
/* atomic64_xchg_relaxed */
#ifndef atomic64_xchg_relaxed
#define atomic64_xchg_relaxed atomic64_xchg
#define atomic64_xchg_acquire atomic64_xchg
#define atomic64_xchg_release atomic64_xchg
 
#else /* atomic64_xchg_relaxed */
 
#ifndef atomic64_xchg_acquire
#define atomic64_xchg_acquire(...) \
__atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_xchg_release
#define atomic64_xchg_release(...) \
__atomic_op_release(atomic64_xchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_xchg
#define atomic64_xchg(...) \
__atomic_op_fence(atomic64_xchg, __VA_ARGS__)
#endif
#endif /* atomic64_xchg_relaxed */
 
/* atomic64_cmpxchg_relaxed */
#ifndef atomic64_cmpxchg_relaxed
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
#define atomic64_cmpxchg_acquire atomic64_cmpxchg
#define atomic64_cmpxchg_release atomic64_cmpxchg
 
#else /* atomic64_cmpxchg_relaxed */
 
#ifndef atomic64_cmpxchg_acquire
#define atomic64_cmpxchg_acquire(...) \
__atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_cmpxchg_release
#define atomic64_cmpxchg_release(...) \
__atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
#endif
 
#ifndef atomic64_cmpxchg
#define atomic64_cmpxchg(...) \
__atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
#endif
#endif /* atomic64_cmpxchg_relaxed */
 
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
#define cmpxchg_acquire cmpxchg
#define cmpxchg_release cmpxchg
 
#else /* cmpxchg_relaxed */
 
#ifndef cmpxchg_acquire
#define cmpxchg_acquire(...) \
__atomic_op_acquire(cmpxchg, __VA_ARGS__)
#endif
 
#ifndef cmpxchg_release
#define cmpxchg_release(...) \
__atomic_op_release(cmpxchg, __VA_ARGS__)
#endif
 
#ifndef cmpxchg
#define cmpxchg(...) \
__atomic_op_fence(cmpxchg, __VA_ARGS__)
#endif
#endif /* cmpxchg_relaxed */
 
/* cmpxchg64_relaxed */
#ifndef cmpxchg64_relaxed
#define cmpxchg64_relaxed cmpxchg64
#define cmpxchg64_acquire cmpxchg64
#define cmpxchg64_release cmpxchg64
 
#else /* cmpxchg64_relaxed */
 
#ifndef cmpxchg64_acquire
#define cmpxchg64_acquire(...) \
__atomic_op_acquire(cmpxchg64, __VA_ARGS__)
#endif
 
#ifndef cmpxchg64_release
#define cmpxchg64_release(...) \
__atomic_op_release(cmpxchg64, __VA_ARGS__)
#endif
 
#ifndef cmpxchg64
#define cmpxchg64(...) \
__atomic_op_fence(cmpxchg64, __VA_ARGS__)
#endif
#endif /* cmpxchg64_relaxed */
 
/* xchg_relaxed */
#ifndef xchg_relaxed
#define xchg_relaxed xchg
#define xchg_acquire xchg
#define xchg_release xchg
 
#else /* xchg_relaxed */
 
#ifndef xchg_acquire
#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
#endif
 
#ifndef xchg_release
#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
#endif
 
#ifndef xchg
#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
#endif
#endif /* xchg_relaxed */
 
/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
28,6 → 448,23
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
 
#ifndef atomic_andnot
static inline void atomic_andnot(int i, atomic_t *v)
{
atomic_and(~i, v);
}
#endif
 
static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
atomic_andnot(mask, v);
}
 
static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
{
atomic_or(mask, v);
}
 
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
111,21 → 548,17
}
#endif
 
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
static inline void atomic_or(int i, atomic_t *v)
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
 
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
int old;
int new;
 
do {
old = atomic_read(v);
new = old | i;
} while (atomic_cmpxchg(v, old, new) != old);
atomic64_and(~i, v);
}
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
#endif
 
#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
 
#endif /* _LINUX_ATOMIC_H */
/drivers/include/linux/bitmap.h
52,16 → 52,13
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
* bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz
* bitmap_scnprintf(buf, len, src, nbits) Print bitmap src to buf
* bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf
* bitmap_scnlistprintf(buf, len, src, nbits) Print bitmap src as list to buf
* bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf
* bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
*/
 
/*
96,10 → 93,10
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
147,59 → 144,55
align_mask, 0);
}
 
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
const unsigned long *old, const unsigned long *new, unsigned int nbits);
extern int bitmap_bitremap(int oldbit,
const unsigned long *old, const unsigned long *new, int bits);
extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, int bits);
const unsigned long *relmap, unsigned int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
unsigned int sz, unsigned int nbits);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
#define bitmap_copy_le bitmap_copy
#endif
extern unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, unsigned int nbits);
extern int bitmap_print_to_pagebuf(bool list, char *buf,
const unsigned long *maskp, int nmaskbits);
 
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(nbits) \
( \
((nbits) % BITS_PER_LONG) ? \
(1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
)
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
 
#define small_const_nbits(nbits) \
(__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
 
static inline void bitmap_zero(unsigned long *dst, int nbits)
static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memset(dst, 0, len);
}
}
 
static inline void bitmap_fill(unsigned long *dst, int nbits)
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
{
size_t nlongs = BITS_TO_LONGS(nbits);
unsigned int nlongs = BITS_TO_LONGS(nbits);
if (!small_const_nbits(nbits)) {
int len = (nlongs - 1) * sizeof(unsigned long);
unsigned int len = (nlongs - 1) * sizeof(unsigned long);
memset(dst, 0xff, len);
}
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
206,12 → 199,12
}
 
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
int nbits)
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src;
else {
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
memcpy(dst, src, len);
}
}
290,8 → 283,8
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_empty(src, nbits);
 
return find_first_bit(src, nbits) == nbits;
}
 
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
298,11 → 291,11
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
else
return __bitmap_full(src, nbits);
 
return find_first_zero_bit(src, nbits) == nbits;
}
 
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
static __always_inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
309,22 → 302,22
return __bitmap_weight(src, nbits);
}
 
static inline void bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int n, int nbits)
static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift;
else
__bitmap_shift_right(dst, src, n, nbits);
__bitmap_shift_right(dst, src, shift, nbits);
}
 
static inline void bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int n, int nbits)
static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = (*src << n) & BITMAP_LAST_WORD_MASK(nbits);
*dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits);
else
__bitmap_shift_left(dst, src, n, nbits);
__bitmap_shift_left(dst, src, shift, nbits);
}
 
static inline int bitmap_parse(const char *buf, unsigned int buflen,
/drivers/include/linux/bitops.h
57,7 → 57,7
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
 
static __inline__ int get_bitmask_order(unsigned int count)
static inline int get_bitmask_order(unsigned int count)
{
int order;
 
65,7 → 65,7
return order; /* We could be slightly more clever with -1 here... */
}
 
static __inline__ int get_count_order(unsigned int count)
static inline int get_count_order(unsigned int count)
{
int order;
 
75,7 → 75,7
return order;
}
 
static inline unsigned long hweight_long(unsigned long w)
static __always_inline unsigned long hweight_long(unsigned long w)
{
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
}
164,6 → 164,8
* sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<32) to sign bit
*
* This is safe to use for 16- and 8-bit types as well.
*/
static inline __s32 sign_extend32(__u32 value, int index)
{
171,6 → 173,17
return (__s32)(value << shift) >> shift;
}
 
/**
* sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
* @value: value to sign extend
* @index: 0 based bit index (0<=index<64) to sign bit
*/
static inline __s64 sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
return (__s64)(value << shift) >> shift;
}
 
static inline unsigned fls_long(unsigned long l)
{
if (sizeof(l) == 4)
218,9 → 231,9
/**
* find_last_bit - find the last set bit in a memory region
* @addr: The address to start the search at
* @size: The maximum size to search
* @size: The number of bits to search
*
* Returns the bit number of the first set bit, or size.
* Returns the bit number of the last set bit, or size.
*/
extern unsigned long find_last_bit(const unsigned long *addr,
unsigned long size);
/drivers/include/linux/bottom_half.h
2,7 → 2,6
#define _LINUX_BH_H
 
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
 
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
/drivers/include/linux/circ_buf.h
0,0 → 1,36
/*
* See Documentation/circular-buffers.txt for more information.
*/
 
#ifndef _LINUX_CIRC_BUF_H
#define _LINUX_CIRC_BUF_H 1
 
struct circ_buf {
char *buf;
int head;
int tail;
};
 
/* Return count in buffer. */
#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1))
 
/* Return space available, 0..size-1. We always leave one free char
as a completely full buffer has head == tail, which is the same as
empty. */
#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size))
 
/* Return count up to the end of the buffer. Carefully avoid
accessing head and tail more than once, so they can change
underneath us without returning inconsistent results. */
#define CIRC_CNT_TO_END(head,tail,size) \
({int end = (size) - (tail); \
int n = ((head) + end) & ((size)-1); \
n < end ? n : end;})
 
/* Return space available up to the end of the buffer. */
#define CIRC_SPACE_TO_END(head,tail,size) \
({int end = (size) - 1 - (head); \
int n = (end + (tail)) & ((size)-1); \
n <= end ? n : end+1;})
 
#endif /* _LINUX_CIRC_BUF_H */
/drivers/include/linux/compiler-gcc.h
9,10 → 9,24
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
 
/* Optimization barrier */
 
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
/*
* This version is i.e. to prevent dead stores elimination on @ptr
* where gcc and llvm may behave differently when otherwise using
* normal barrier(): while gcc behavior gets along with a normal
* barrier(), llvm needs an explicit input variable to be assumed
* clobbered. The issue is as follows: while the inline asm might
* access any memory it wants, the compiler could have fit all of
* @ptr into memory registers instead, and since @ptr never escaped
* from that, it proofed that the inline asm wasn't touching any of
* it. This version works well with both compilers, i.e. we're telling
* the compiler that the inline asm absolutely may see the contents
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
*/
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
 
/*
* This macro obfuscates arithmetic on a variable address so that gcc
33,15 → 47,18
* case either is valid.
*/
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
({ \
unsigned long __ptr; \
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
(typeof(ptr)) (__ptr + (off)); \
})
 
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
 
#ifdef __CHECKER__
#define __must_be_array(arr) 0
#define __must_be_array(a) 0
#else
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
63,19 → 80,25
# define __inline __inline notrace
#endif
 
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
 
#define __deprecated __attribute__((deprecated))
#define __packed __attribute__((packed))
#define __weak __attribute__((weak))
#define __alias(symbol) __attribute__((alias(#symbol)))
 
/*
* it doesn't make sense on ARM (currently the only user of __naked) to trace
* naked functions because then mcount is called without stack and frame pointer
* being set up and there is no chance to restore the lr register to the value
* before mcount was called.
* it doesn't make sense on ARM (currently the only user of __naked)
* to trace naked functions because then mcount is called without
* stack and frame pointer being set up and there is no chance to
* restore the lr register to the value before mcount was called.
*
* The asm() bodies of naked functions often depend on standard calling conventions,
* therefore they must be noinline and noclone. GCC 4.[56] currently fail to enforce
* this, so we must do so ourselves. See GCC PR44290.
* The asm() bodies of naked functions often depend on standard calling
* conventions, therefore they must be noinline and noclone.
*
* GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
* See GCC PR44290.
*/
#define __naked __attribute__((naked)) noinline __noclone notrace
 
95,24 → 118,166
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
#define __always_unused __attribute__((unused))
 
#define __gcc_header(x) #x
#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
#define gcc_header(x) _gcc_header(x)
#include gcc_header(__GNUC__)
/* gcc version specific checks */
 
#if GCC_VERSION < 30200
# error Sorry, your compiler is too old - please upgrade it.
#endif
 
#if GCC_VERSION < 30300
# define __used __attribute__((__unused__))
#else
# define __used __attribute__((__used__))
#endif
 
#ifdef CONFIG_GCOV_KERNEL
# if GCC_VERSION < 30400
# error "GCOV profiling support for gcc versions below 3.4 not included"
# endif /* __GNUC_MINOR__ */
#endif /* CONFIG_GCOV_KERNEL */
 
#if GCC_VERSION >= 30400
#define __must_check __attribute__((warn_unused_result))
#endif
 
#if GCC_VERSION >= 40000
 
/* GCC 4.1.[01] miscompiles __weak */
#ifdef __KERNEL__
# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
 
#define __used __attribute__((__used__))
#define __compiler_offsetof(a, b) \
__builtin_offsetof(a, b)
 
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
 
#if GCC_VERSION >= 40300
/* Mark functions as cold. gcc will assume any path leading to a call
* to them will be unlikely. This means a lot of manual unlikely()s
* are unnecessary now for any paths leading to the usual suspects
* like BUG(), printk(), panic() etc. [but let's keep them for now for
* older compilers]
*
* Early snapshots of gcc 4.3 don't support this and we can't detect this
* in the preprocessor, but we can live with this because they're unreleased.
* Maketime probing would be overkill here.
*
* gcc also has a __attribute__((__hot__)) to move hot functions into
* a special section, but I don't see any sense in this right now in
* the kernel context
*/
#define __cold __attribute__((__cold__))
 
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
#ifndef __CHECKER__
# define __compiletime_warning(message) __attribute__((warning(message)))
# define __compiletime_error(message) __attribute__((error(message)))
#endif /* __CHECKER__ */
#endif /* GCC_VERSION >= 40300 */
 
#if GCC_VERSION >= 40500
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
* control elsewhere.
*
* Early snapshots of gcc 4.5 don't support this and we can't detect
* this in the preprocessor, but we can live with this because they're
* unreleased. Really, we need to have autoconf for the kernel.
*/
#define unreachable() __builtin_unreachable()
 
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__))
 
#endif /* GCC_VERSION >= 40500 */
 
#if GCC_VERSION >= 40600
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
* optimizer that something else uses this function or variable, thus preventing
* this.
*/
#define __visible __attribute__((externally_visible))
#endif
 
 
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
* __assume_aligned(n, k): Tell the optimizer that the returned
* pointer can be assumed to be k modulo n. The second argument is
* optional (default 0), so we use a variadic macro to make the
* shorthand.
*
* Beware: Do not apply this to functions which may return
* ERR_PTRs. Also, it is probably unwise to apply it to functions
* returning extra information in the low bits (but in that case the
* compiler should see some alignment anyway, when the return value is
* massaged by 'flags = ptr & 3; ptr &= ~3;').
*/
#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
#endif
 
/*
* GCC 'asm goto' miscompiles certain code sequences:
*
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
*
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
 
#if GCC_VERSION >= 50000
#define KASAN_ABI_VERSION 4
#elif GCC_VERSION >= 40902
#define KASAN_ABI_VERSION 3
#endif
 
#if GCC_VERSION >= 40902
/*
* Tell the compiler that address safety instrumentation (KASAN)
* should not be applied to that function.
* Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
*/
#define __no_sanitize_address __attribute__((no_sanitize_address))
#endif
 
#endif /* gcc version >= 40000 specific checks */
 
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
 
#if !defined(__no_sanitize_address)
#define __no_sanitize_address
#endif
 
/*
* A trick to suppress uninitialized variable warning without generating any
* code
*/
#define uninitialized_var(x) x = x
 
#define __always_inline inline __attribute__((always_inline))
/drivers/include/linux/compiler.h
17,6 → 17,7
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
# define __percpu __attribute__((noderef, address_space(3)))
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
42,6 → 43,7
# define __cond_lock(x,c) (c)
# define __percpu
# define __rcu
# define __pmem
#endif
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
54,7 → 56,11
#include <linux/compiler-gcc.h>
#endif
 
#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
#define notrace __attribute__((hotpatch(0,0)))
#else
#define notrace __attribute__((no_instrument_function))
#endif
 
/* Intel compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
165,6 → 171,10
# define barrier() __memory_barrier()
#endif
 
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif
 
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
188,46 → 198,56
 
#include <uapi/linux/types.h>
 
static __always_inline void data_access_exceeds_word_size(void)
#ifdef __compiletime_warning
__compiletime_warning("data access exceeds word size and won't be atomic")
#endif
;
#define __READ_ONCE_SIZE \
({ \
switch (size) { \
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \
default: \
barrier(); \
__builtin_memcpy((void *)res, (const void *)p, size); \
barrier(); \
} \
})
 
static __always_inline void data_access_exceeds_word_size(void)
static __always_inline
void __read_once_size(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
 
static __always_inline void __read_once_size(volatile void *p, void *res, int size)
#ifdef CONFIG_KASAN
/*
* This function is not 'inline' because __no_sanitize_address confilcts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
static __no_sanitize_address __maybe_unused
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
#ifdef CONFIG_64BIT
case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
#endif
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
data_access_exceeds_word_size();
barrier();
__READ_ONCE_SIZE;
}
#else
static __always_inline
void __read_once_size_nocheck(const volatile void *p, void *res, int size)
{
__READ_ONCE_SIZE;
}
#endif
 
static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
#ifdef CONFIG_64BIT
case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
#endif
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
data_access_exceeds_word_size();
barrier();
}
}
235,15 → 255,15
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
* READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
* WRITE_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
254,12 → 274,31
* required ordering.
*/
 
#define READ_ONCE(x) \
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
#define __READ_ONCE(x, check) \
({ \
union { typeof(x) __val; char __c[1]; } __u; \
if (check) \
__read_once_size(&(x), __u.__c, sizeof(x)); \
else \
__read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
#define READ_ONCE(x) __READ_ONCE(x, 1)
 
#define ASSIGN_ONCE(val, x) \
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
/*
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need
* to hide memory access from KASAN.
*/
#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0)
 
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(x)) (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
 
#endif /* __KERNEL__ */
 
#endif /* __ASSEMBLY__ */
378,6 → 417,14
#define __visible
#endif
 
/*
* Assume alignment of return value.
*/
#ifndef __assume_aligned
#define __assume_aligned(a, ...)
#endif
 
 
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
385,7 → 432,7
 
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
 
/* Compile time object size, -1 for unknown */
447,13 → 494,39
* to make the compiler aware of ordering is to put the two invocations of
* ACCESS_ONCE() in different C statements.
*
* This macro does absolutely -nothing- to prevent the CPU from reordering,
* merging, or refetching absolutely anything at any time. Its main intended
* use is to mediate communication between process-level code and irq/NMI
* handlers, all running on the same CPU.
* ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
* on a union member will work as long as the size of the member matches the
* size of the union and the size is smaller than word size.
*
* The major use cases of ACCESS_ONCE used to be (1) Mediating communication
* between process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*
* If possible use READ_ONCE()/WRITE_ONCE() instead.
*/
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
#define __ACCESS_ONCE(x) ({ \
__maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
(volatile typeof(x) *)&(x); })
#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
 
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
/drivers/include/linux/component.h
0,0 → 1,39
#ifndef COMPONENT_H
#define COMPONENT_H
 
struct device;
 
struct component_ops {
int (*bind)(struct device *, struct device *, void *);
void (*unbind)(struct device *, struct device *, void *);
};
 
int component_add(struct device *, const struct component_ops *);
void component_del(struct device *, const struct component_ops *);
 
int component_bind_all(struct device *, void *);
void component_unbind_all(struct device *, void *);
 
struct master;
 
struct component_master_ops {
int (*add_components)(struct device *, struct master *);
int (*bind)(struct device *);
void (*unbind)(struct device *);
};
 
int component_master_add(struct device *, const struct component_master_ops *);
void component_master_del(struct device *,
const struct component_master_ops *);
 
int component_master_add_child(struct master *master,
int (*compare)(struct device *, void *), void *compare_data);
 
struct component_match;
 
int component_master_add_with_match(struct device *,
const struct component_master_ops *, struct component_match *);
void component_match_add(struct device *, struct component_match **,
int (*compare)(struct device *, void *), void *compare_data);
 
#endif
/drivers/include/linux/cpumask.h
11,6 → 11,7
#include <linux/bitmap.h>
#include <linux/bug.h>
 
/* Don't assign or return these: may not be this big! */
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
 
/**
22,6 → 23,14
*/
#define cpumask_bits(maskp) ((maskp)->bits)
 
/**
* cpumask_pr_args - printf args to output a cpumask
* @maskp: cpumask to be printed
*
* Can be used to provide arguments for '%*pb[l]' when printing a cpumask.
*/
#define cpumask_pr_args(maskp) nr_cpu_ids, cpumask_bits(maskp)
 
#if NR_CPUS == 1
#define nr_cpu_ids 1
#else
142,10 → 151,8
return 1;
}
 
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
{
set_bit(0, cpumask_bits(dstp));
 
return 0;
}
 
199,7 → 206,7
 
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
unsigned int cpumask_local_spread(unsigned int i, int node);
 
/**
* for_each_cpu - iterate over every cpu in a mask
281,11 → 288,11
* @cpumask: the cpumask pointer
*
* Returns 1 if @cpu is set in @cpumask, else returns 0
*
* No static inline type checking - see Subtlety (1) above.
*/
#define cpumask_test_cpu(cpu, cpumask) \
test_bit(cpumask_check(cpu), cpumask_bits((cpumask)))
static inline int cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
return test_bit(cpumask_check(cpu), cpumask_bits((cpumask)));
}
 
/**
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
539,21 → 546,6
#define cpumask_of(cpu) (get_cpu_mask(cpu))
 
/**
* cpumask_scnprintf - print a cpumask into a string as comma-separated hex
* @buf: the buffer to sprintf into
* @len: the length of the buffer
* @srcp: the cpumask to print
*
* If len is zero, returns zero. Otherwise returns the length of the
* (nul-terminated) @buf string.
*/
static inline int cpumask_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits);
}
 
/**
* cpumask_parse_user - extract a cpumask from a user string
* @buf: the buffer to extract from
* @len: the length of the buffer
564,7 → 556,7
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
{
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
 
/**
579,26 → 571,10
struct cpumask *dstp)
{
return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
nr_cpumask_bits);
nr_cpu_ids);
}
 
/**
* cpulist_scnprintf - print a cpumask into a string as comma-separated list
* @buf: the buffer to sprintf into
* @len: the length of the buffer
* @srcp: the cpumask to print
*
* If len is zero, returns zero. Otherwise returns the length of the
* (nul-terminated) @buf string.
*/
static inline int cpulist_scnprintf(char *buf, int len,
const struct cpumask *srcp)
{
return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp),
nr_cpumask_bits);
}
 
/**
* cpumask_parse - extract a cpumask from from a string
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
610,7 → 586,7
char *nl = strchr(buf, '\n');
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
 
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
}
 
/**
622,7 → 598,7
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
}
 
/**
632,9 → 608,7
*/
static inline size_t cpumask_size(void)
{
/* FIXME: Once all cpumask assignments are eliminated, this
* can be nr_cpumask_bits */
return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
}
 
/*
791,7 → 765,7
#if NR_CPUS <= BITS_PER_LONG
#define CPU_BITS_ALL \
{ \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
 
#else /* NR_CPUS > BITS_PER_LONG */
799,7 → 773,7
#define CPU_BITS_ALL \
{ \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
}
#endif /* NR_CPUS > BITS_PER_LONG */
 
817,36 → 791,22
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
{
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
nr_cpumask_bits);
nr_cpu_ids);
}
 
/*
*
* From here down, all obsolete. Use cpumask_ variants!
*
*/
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu))
 
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS)
 
#if NR_CPUS <= BITS_PER_LONG
 
#define CPU_MASK_ALL \
(cpumask_t) { { \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
 
#else
 
#define CPU_MASK_ALL \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
[BITS_TO_LONGS(NR_CPUS)-1] = BITMAP_LAST_WORD_MASK(NR_CPUS) \
} }
#endif /* NR_CPUS > BITS_PER_LONG */
 
#endif
 
#define CPU_MASK_NONE \
(cpumask_t) { { \
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
857,143 → 817,4
[0] = 1UL \
} }
 
#if NR_CPUS == 1
#define first_cpu(src) ({ (void)(src); 0; })
#define next_cpu(n, src) ({ (void)(src); 1; })
#define any_online_cpu(mask) 0
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#else /* NR_CPUS > 1 */
int __first_cpu(const cpumask_t *srcp);
int __next_cpu(int n, const cpumask_t *srcp);
 
#define first_cpu(src) __first_cpu(&(src))
#define next_cpu(n, src) __next_cpu((n), &(src))
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask)
#define for_each_cpu_mask(cpu, mask) \
for ((cpu) = -1; \
(cpu) = next_cpu((cpu), (mask)), \
(cpu) < NR_CPUS; )
#endif /* SMP */
 
#if NR_CPUS <= 64
 
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask)
 
#else /* NR_CPUS > 64 */
 
int __next_cpu_nr(int n, const cpumask_t *srcp);
#define for_each_cpu_mask_nr(cpu, mask) \
for ((cpu) = -1; \
(cpu) = __next_cpu_nr((cpu), &(mask)), \
(cpu) < nr_cpu_ids; )
 
#endif /* NR_CPUS > 64 */
 
#define cpus_addr(src) ((src).bits)
 
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpu, dstp->bits);
}
 
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst))
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp)
{
clear_bit(cpu, dstp->bits);
}
 
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS)
static inline void __cpus_setall(cpumask_t *dstp, int nbits)
{
bitmap_fill(dstp->bits, nbits);
}
 
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS)
static inline void __cpus_clear(cpumask_t *dstp, int nbits)
{
bitmap_zero(dstp->bits, nbits);
}
 
/* No static inline type checking - see Subtlety (1) above. */
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits)
 
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask))
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr)
{
return test_and_set_bit(cpu, addr->bits);
}
 
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS)
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_andnot(dst, src1, src2) \
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS)
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits);
}
 
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS)
static inline int __cpus_equal(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_equal(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS)
static inline int __cpus_intersects(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_intersects(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS)
static inline int __cpus_subset(const cpumask_t *src1p,
const cpumask_t *src2p, int nbits)
{
return bitmap_subset(src1p->bits, src2p->bits, nbits);
}
 
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS)
static inline int __cpus_empty(const cpumask_t *srcp, int nbits)
{
return bitmap_empty(srcp->bits, nbits);
}
 
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS)
static inline int __cpus_weight(const cpumask_t *srcp, int nbits)
{
return bitmap_weight(srcp->bits, nbits);
}
 
#define cpus_shift_left(dst, src, n) \
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_left(cpumask_t *dstp,
const cpumask_t *srcp, int n, int nbits)
{
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits);
}
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */
 
#endif /* __LINUX_CPUMASK_H */
/drivers/include/linux/device.h
0,0 → 1,99
/*
* device.h - generic, centralized driver model
*
* Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
* Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2008-2009 Novell Inc.
*
* This file is released under the GPLv2
*
* See Documentation/driver-model/ for more information.
*/
 
#ifndef _DEVICE_H_
#define _DEVICE_H_
 
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/mutex.h>
struct device;
enum probe_type {
PROBE_DEFAULT_STRATEGY,
PROBE_PREFER_ASYNCHRONOUS,
PROBE_FORCE_SYNCHRONOUS,
};
 
struct device_driver {
const char *name;
const char *mod_name; /* used for built-in modules */
 
bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
enum probe_type probe_type;
};
 
struct device {
struct device *parent;
 
const char *init_name; /* initial name of the device */
struct device_driver *driver; /* which driver has allocated this
device */
void *platform_data; /* Platform specific data, device
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct irq_domain *msi_domain;
#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
#ifdef CONFIG_GENERIC_MSI_IRQ
struct list_head msi_list;
#endif
 
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
#endif
#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
};
 
extern __printf(2, 3)
int dev_set_name(struct device *dev, const char *name, ...);
 
#ifdef CONFIG_NUMA
static inline int dev_to_node(struct device *dev)
{
return dev->numa_node;
}
static inline void set_dev_node(struct device *dev, int node)
{
dev->numa_node = node;
}
#else
static inline int dev_to_node(struct device *dev)
{
return -1;
}
static inline void set_dev_node(struct device *dev, int node)
{
}
#endif
 
 
static inline void *dev_get_drvdata(const struct device *dev)
{
return dev->driver_data;
}
 
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
 
 
 
#endif /* _DEVICE_H_ */
/drivers/include/linux/dma-buf.h
115,6 → 115,8
* @attachments: list of dma_buf_attachment that denotes all devices attached.
* @ops: dma_buf_ops associated with this buffer object.
* @exp_name: name of the exporter; useful for debugging.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
170,13 → 172,8
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *dmabuf_attach);
 
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *,
struct reservation_object *);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
 
#define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
 
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
void dma_buf_put(struct dma_buf *dmabuf);
/drivers/include/linux/dma_remapping.h
20,7 → 20,15
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
/* Extended context entry types */
#define CONTEXT_TT_PT_PASID 4
#define CONTEXT_TT_PT_PASID_DEV_IOTLB 5
#define CONTEXT_TT_MASK (7ULL << 2)
 
#define CONTEXT_DINVE (1ULL << 8)
#define CONTEXT_PRS (1ULL << 9)
#define CONTEXT_PASIDE (1ULL << 11)
 
struct intel_iommu;
struct dmar_domain;
struct root_entry;
/drivers/include/linux/dmapool.h
19,6 → 19,12
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle);
 
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
return dma_pool_alloc(pool, mem_flags | __GFP_ZERO, handle);
}
 
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
 
/*
/drivers/include/linux/dmi.h
74,7 → 74,7
u8 type;
u8 length;
u16 handle;
};
} __packed;
 
struct dmi_device {
struct list_head list;
/drivers/include/linux/fb.h
1111,7 → 1111,9
struct fb_videomode *fbmode);
 
/* drivers/video/modedb.c */
#define VESA_MODEDB_SIZE 34
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
 
extern void fb_var_to_videomode(struct fb_videomode *mode,
const struct fb_var_screeninfo *var);
extern void fb_videomode_to_var(struct fb_var_screeninfo *var,
1162,9 → 1164,17
u32 flag;
};
 
struct dmt_videomode {
u32 dmt_id;
u32 std_2byte_code;
u32 cvt_3byte_code;
const struct fb_videomode *mode;
};
 
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
extern const struct dmt_videomode dmt_modes[];
 
struct fb_modelist {
struct list_head list;
1178,4 → 1188,16
const struct fb_videomode *default_mode,
unsigned int default_bpp);
 
/* Convenience logging macros */
#define fb_err(fb_info, fmt, ...) \
pr_err("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_notice(info, fmt, ...) \
pr_notice("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_warn(fb_info, fmt, ...) \
pr_warn("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_info(fb_info, fmt, ...) \
pr_info("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
#define fb_dbg(fb_info, fmt, ...) \
pr_debug("fb%d: " fmt, (fb_info)->node, ##__VA_ARGS__)
 
#endif /* _LINUX_FB_H */
/drivers/include/linux/fence.h
77,7 → 77,7
spinlock_t *lock;
unsigned context, seqno;
unsigned long flags;
// ktime_t timestamp;
ktime_t timestamp;
int status;
};
 
280,6 → 280,22
}
 
/**
* fence_is_later - return if f1 is chronologically later than f2
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not re-used across contexts.
*/
static inline bool fence_is_later(struct fence *f1, struct fence *f2)
{
if (WARN_ON(f1->context != f2->context))
return false;
 
return f1->seqno - f2->seqno < INT_MAX;
}
 
/**
* fence_later - return the chronologically later fence
* @f1: [in] the first fence from the same context
* @f2: [in] the second fence from the same context
298,15 → 314,16
* set if enable_signaling wasn't called, and enabling that here is
* overkill.
*/
if (f2->seqno - f1->seqno <= INT_MAX)
if (fence_is_later(f1, f2))
return fence_is_signaled(f1) ? NULL : f1;
else
return fence_is_signaled(f2) ? NULL : f2;
else
return fence_is_signaled(f1) ? NULL : f1;
}
 
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout);
signed long fence_wait_any_timeout(struct fence **fences, uint32_t count,
bool intr, signed long timeout);
 
 
/**
* fence_wait - sleep until the fence gets signaled
* @fence: [in] the fence to wait on
/drivers/include/linux/firmware.h
1,10 → 1,11
#ifndef _LINUX_FIRMWARE_H
#define _LINUX_FIRMWARE_H
 
#include <linux/module.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/cache.h>
#include <linux/module.h>
 
#define FW_ACTION_NOHOTPLUG 0
#define FW_ACTION_HOTPLUG 1
/drivers/include/linux/gfp.h
13,7 → 13,7
#define ___GFP_HIGHMEM 0x02u
#define ___GFP_DMA32 0x04u
#define ___GFP_MOVABLE 0x08u
#define ___GFP_WAIT 0x10u
#define ___GFP_RECLAIMABLE 0x10u
#define ___GFP_HIGH 0x20u
#define ___GFP_IO 0x40u
#define ___GFP_FS 0x80u
28,18 → 28,18
#define ___GFP_NOMEMALLOC 0x10000u
#define ___GFP_HARDWALL 0x20000u
#define ___GFP_THISNODE 0x40000u
#define ___GFP_RECLAIMABLE 0x80000u
#define ___GFP_ATOMIC 0x80000u
#define ___GFP_NOACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_NO_KSWAPD 0x400000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
/*
* GFP bitmasks..
* Physical address zone modifiers (see linux/mmzone.h - low four bits)
*
* Zone modifiers (see linux/mmzone.h - low three bits)
*
* Do not put any conditional on these. If necessary modify the definitions
* without the underscores and use them consistently. The definitions here may
* be used in bit comparisons.
48,110 → 48,219
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
/*
* Action modifiers - doesn't change the zoning
* Page mobility and placement hints
*
* These flags provide hints about how mobile the page is. Pages with similar
* mobility are placed within the same pageblocks to minimise problems due
* to external fragmentation.
*
* __GFP_MOVABLE (also a zone modifier) indicates that the page can be
* moved by page migration during memory compaction or can be reclaimed.
*
* __GFP_RECLAIMABLE is used for slab allocations that specify
* SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
*
* __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
* these pages will be spread between local zones to avoid all the dirty
* pages being in one zone (fair zone allocation policy).
*
* __GFP_HARDWALL enforces the cpuset memory allocation policy.
*
* __GFP_THISNODE forces the allocation to be satisified from the requested
* node with no fallbacks or placement policy enforcements.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
 
/*
* Watermark modifiers -- controls access to emergency reserves
*
* __GFP_HIGH indicates that the caller is high-priority and that granting
* the request is necessary before the system can make forward progress.
* For example, creating an IO context to clean pages.
*
* __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
* high priority. Users are typically interrupt handlers. This may be
* used in conjunction with __GFP_HIGH
*
* __GFP_MEMALLOC allows access to all memory. This should only be used when
* the caller guarantees the allocation will allow more memory to be freed
* very shortly e.g. process exiting or swapping. Users either should
* be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
*
* __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT)
 
/*
* Reclaim modifiers
*
* __GFP_IO can start physical IO.
*
* __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
* allocator recursing into the filesystem which might already be holding
* locks.
*
* __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
* This flag can be cleared to avoid unnecessary delays when a fallback
* option is available.
*
* __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
* the low watermark is reached and have it reclaim pages until the high
* watermark is reached. A caller may wish to clear this flag when fallback
* options are available and the reclaim is likely to disrupt the system. The
* canonical example is THP allocation where a fallback is cheap but
* reclaim/compaction may cause indirect stalls.
*
* __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
* _might_ fail. This depends upon the particular VM implementation.
*
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
* cannot handle allocation failures. This modifier is deprecated and no new
* users should be added.
* cannot handle allocation failures. New users should be evaluated carefully
* (and the flag should be used only when there is no reasonable failure
* policy) but it is definitely preferable to use the flag rather than
* opencode endless loop around allocator.
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*
* __GFP_MOVABLE: Flag that this page will be movable by the page migration
* mechanism or reclaimed
* __GFP_NORETRY: The VM implementation must not retry indefinitely and will
* return NULL when direct reclaim and memory compaction have failed to allow
* the allocation to succeed. The OOM killer is not called with the current
* implementation.
*/
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves.
* This takes precedence over the
* __GFP_MEMALLOC flag if both are
* set
*/
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
#define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT)
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
 
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
 
/*
* This may seem redundant, but it's a way of annotating false positives vs.
* allocations that simply cannot be supported (e.g. page tables).
* Action modifiers
*
* __GFP_COLD indicates that the caller does not expect to be used in the near
* future. Where possible, a cache-cold page will be returned.
*
* __GFP_NOWARN suppresses allocation failure reports.
*
* __GFP_COMP address compound page metadata.
*
* __GFP_ZERO returns a zeroed page on success.
*
* __GFP_NOTRACK avoids tracking with kmemcheck.
*
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
*
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
* should not be accounted for as a remote allocation in vmstat. A
* typical user would be khugepaged collapsing a huge page on a remote
* node.
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
 
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
/* This equals 0, but use constants in case they ever change */
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_NOIO (__GFP_WAIT)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
/*
* Useful GFP flag combinations that are commonly used. It is recommended
* that subsystems start with one of these combinations and then set/clear
* __GFP_FOO flags as necessary.
*
* GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
* watermark is applied to allow access to "atomic reserves"
*
* GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
*
* GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback.
*
* GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
*
* GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
*
* GFP_USER is for userspace allocations that also need to be directly
* accessibly by the kernel or hardware. It is typically used by hardware
* for buffers that are mapped to userspace (e.g. graphics) that hardware
* still must DMA to. cpuset limits are enforced for these allocations.
*
* GFP_DMA exists for historical reasons and should be avoided where possible.
* The flags indicates that the caller requires that the lowest zone be
* used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
* it would require careful auditing as some users really require it and
* others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
* lowest zone as a type of emergency reserve.
*
* GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
* address.
*
* GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
* do not need to be directly accessible by the kernel but that cannot
* move once in use. An example may be a hardware allocation that maps
* data directly into userspace but has no addressing limitations.
*
* GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
* need direct access to but can use kmap() when access is required. They
* are expected to be movable via page reclaim or page migration. Typically,
* pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
*
* GFP_TRANSHUGE is used for THP allocations. They are compound allocations
* that will fail quickly if memory is not available and will not wake
* kswapd on failure.
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_DMA __GFP_DMA
#define GFP_DMA32 __GFP_DMA32
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_IOFS (__GFP_IO | __GFP_FS)
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_KSWAPD_RECLAIM)
 
/*
* GFP_THISNODE does not perform any reclaim, you most likely want to
* use __GFP_THISNODE to allocate from a given node without fallback!
*/
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else
#define GFP_THISNODE ((__force gfp_t)0)
#endif
 
/* This mask makes up all the page movable related flags */
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
#define GFP_MOVABLE_SHIFT 3
 
/* Control page allocator reclaim behavior */
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
 
/* Control slab gfp mask during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS))
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return gfp_flags & __GFP_DIRECT_RECLAIM;
}
 
/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
 
/* Do not use these with a slab allocator */
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
 
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
 
#define GFP_DMA __GFP_DMA
 
/* 4GB DMA on some platforms */
#define GFP_DMA32 __GFP_DMA32
 
 
#ifdef CONFIG_HIGHMEM
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
#else
/drivers/include/linux/gpio/consumer.h
0,0 → 1,455
#ifndef __LINUX_GPIO_CONSUMER_H
#define __LINUX_GPIO_CONSUMER_H
 
#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
 
struct device;
 
/**
* Opaque descriptor for a GPIO. These are obtained using gpiod_get() and are
* preferable to the old integer-based handles.
*
* Contrary to integers, a pointer to a gpio_desc is guaranteed to be valid
* until the GPIO is released.
*/
struct gpio_desc;
 
/**
* Struct containing an array of descriptors that can be obtained using
* gpiod_get_array().
*/
struct gpio_descs {
unsigned int ndescs;
struct gpio_desc *desc[];
};
 
#define GPIOD_FLAGS_BIT_DIR_SET BIT(0)
#define GPIOD_FLAGS_BIT_DIR_OUT BIT(1)
#define GPIOD_FLAGS_BIT_DIR_VAL BIT(2)
 
/**
* Optional flags that can be passed to one of gpiod_* to configure direction
* and output value. These values cannot be OR'd.
*/
enum gpiod_flags {
GPIOD_ASIS = 0,
GPIOD_IN = GPIOD_FLAGS_BIT_DIR_SET,
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
};
 
#ifdef CONFIG_GPIOLIB
 
/* Return the number of GPIOs associated with a device / function */
int gpiod_count(struct device *dev, const char *con_id);
 
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
const char *con_id,
unsigned int index,
enum gpiod_flags flags);
struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
void gpiod_put(struct gpio_desc *desc);
void gpiod_put_array(struct gpio_descs *descs);
 
struct gpio_desc *__must_check devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags);
struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags);
struct gpio_descs *__must_check devm_gpiod_get_array(struct device *dev,
const char *con_id,
enum gpiod_flags flags);
struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
 
int gpiod_get_direction(struct gpio_desc *desc);
int gpiod_direction_input(struct gpio_desc *desc);
int gpiod_direction_output(struct gpio_desc *desc, int value);
int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
 
/* Value get/set from non-sleeping context */
int gpiod_get_value(const struct gpio_desc *desc);
void gpiod_set_value(struct gpio_desc *desc, int value);
void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array, int *value_array);
int gpiod_get_raw_value(const struct gpio_desc *desc);
void gpiod_set_raw_value(struct gpio_desc *desc, int value);
void gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
 
/* Value get/set from sleeping context */
int gpiod_get_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array);
 
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
 
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
 
int gpiod_to_irq(const struct gpio_desc *desc);
 
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
int desc_to_gpio(const struct gpio_desc *desc);
 
/* Child properties interface */
struct fwnode_handle;
 
struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname);
struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
const char *con_id,
struct fwnode_handle *child);
#else /* CONFIG_GPIOLIB */
 
static inline int gpiod_count(struct device *dev, const char *con_id)
{
return 0;
}
 
static inline struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline struct gpio_desc *__must_check
gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
gpiod_get_array(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline void gpiod_put(struct gpio_desc *desc)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline void gpiod_put_array(struct gpio_descs *descs)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline struct gpio_desc *__must_check
devm_gpiod_get(struct device *dev,
const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
static inline
struct gpio_desc *__must_check
devm_gpiod_get_index(struct device *dev,
const char *con_id,
unsigned int idx,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
devm_gpiod_get_array(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
return ERR_PTR(-ENOSYS);
}
 
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline void devm_gpiod_put_array(struct device *dev,
struct gpio_descs *descs)
{
might_sleep();
 
/* GPIO can never have been requested */
WARN_ON(1);
}
 
 
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
 
 
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
static inline void gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
int *value_array)
{
/* GPIO can never have been requested */
WARN_ON(1);
}
 
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -ENOSYS;
}
 
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return 0;
}
 
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -EINVAL;
}
 
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return ERR_PTR(-EINVAL);
}
 
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
WARN_ON(1);
return -EINVAL;
}
 
/* Child properties interface */
struct fwnode_handle;
 
static inline struct gpio_desc *fwnode_get_named_gpiod(
struct fwnode_handle *fwnode, const char *propname)
{
return ERR_PTR(-ENOSYS);
}
 
static inline struct gpio_desc *devm_get_gpiod_from_child(
struct device *dev, const char *con_id, struct fwnode_handle *child)
{
return ERR_PTR(-ENOSYS);
}
 
#endif /* CONFIG_GPIOLIB */
 
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS)
 
int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc);
void gpiod_unexport(struct gpio_desc *desc);
 
#else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
 
static inline int gpiod_export(struct gpio_desc *desc,
bool direction_may_change)
{
return -ENOSYS;
}
 
static inline int gpiod_export_link(struct device *dev, const char *name,
struct gpio_desc *desc)
{
return -ENOSYS;
}
 
static inline void gpiod_unexport(struct gpio_desc *desc)
{
}
 
#endif /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */
 
#endif
/drivers/include/linux/hdmi.h
25,6 → 25,7
#define __LINUX_HDMI_H_
 
#include <linux/types.h>
#include <linux/device.h>
 
enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
52,6 → 53,11
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
HDMI_COLORSPACE_YUV444,
HDMI_COLORSPACE_YUV420,
HDMI_COLORSPACE_RESERVED4,
HDMI_COLORSPACE_RESERVED5,
HDMI_COLORSPACE_RESERVED6,
HDMI_COLORSPACE_IDO_DEFINED,
};
 
enum hdmi_scan_mode {
58,6 → 64,7
HDMI_SCAN_MODE_NONE,
HDMI_SCAN_MODE_OVERSCAN,
HDMI_SCAN_MODE_UNDERSCAN,
HDMI_SCAN_MODE_RESERVED,
};
 
enum hdmi_colorimetry {
71,6 → 78,7
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
HDMI_PICTURE_ASPECT_RESERVED,
};
 
enum hdmi_active_aspect {
92,6 → 100,11
HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
 
/* The following EC values are only defined in CEA-861-F. */
HDMI_EXTENDED_COLORIMETRY_BT2020_CONST_LUM,
HDMI_EXTENDED_COLORIMETRY_BT2020,
HDMI_EXTENDED_COLORIMETRY_RESERVED,
};
 
enum hdmi_quantization_range {
98,6 → 111,7
HDMI_QUANTIZATION_RANGE_DEFAULT,
HDMI_QUANTIZATION_RANGE_LIMITED,
HDMI_QUANTIZATION_RANGE_FULL,
HDMI_QUANTIZATION_RANGE_RESERVED,
};
 
/* non-uniform picture scaling */
114,7 → 128,7
};
 
enum hdmi_content_type {
HDMI_CONTENT_TYPE_NONE,
HDMI_CONTENT_TYPE_GRAPHICS,
HDMI_CONTENT_TYPE_PHOTO,
HDMI_CONTENT_TYPE_CINEMA,
HDMI_CONTENT_TYPE_GAME,
194,6 → 208,7
HDMI_AUDIO_CODING_TYPE_MLP,
HDMI_AUDIO_CODING_TYPE_DST,
HDMI_AUDIO_CODING_TYPE_WMA_PRO,
HDMI_AUDIO_CODING_TYPE_CXT,
};
 
enum hdmi_audio_sample_size {
215,10 → 230,25
};
 
enum hdmi_audio_coding_type_ext {
HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
/* Refer to Audio Coding Type (CT) field in Data Byte 1 */
HDMI_AUDIO_CODING_TYPE_EXT_CT,
 
/*
* The next three CXT values are defined in CEA-861-E only.
* They do not exist in older versions, and in CEA-861-F they are
* defined as 'Not in use'.
*/
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
 
/* The following CXT values are only defined in CEA-861-F. */
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_V2,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC,
HDMI_AUDIO_CODING_TYPE_EXT_DRA,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_HE_AAC_SURROUND,
HDMI_AUDIO_CODING_TYPE_EXT_MPEG4_AAC_LC_SURROUND = 10,
};
 
struct hdmi_audio_infoframe {
299,5 → 329,8
 
ssize_t
hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
void hdmi_infoframe_log(const char *level, struct device *dev,
union hdmi_infoframe *frame);
 
#endif /* _DRM_HDMI_H */
/drivers/include/linux/i2c.h
26,11 → 26,8
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
 
#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/module.h>
#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h> /* for struct device */
#include <linux/sched.h> /* for completion */
#include <linux/mutex.h>
#include <linux/jiffies.h>
61,8 → 58,6
* @probe: Callback for device binding
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @suspend: Callback for device suspend
* @resume: Callback for device resume
* @alert: Alert callback, for example for the SMBus alert protocol
* @command: Callback for bus-wide signaling (optional)
* @driver: Device driver model driver
105,8 → 100,6
 
/* driver model interfaces that don't relate to enumeration */
void (*shutdown)(struct i2c_client *);
// int (*suspend)(struct i2c_client *, pm_message_t mesg);
int (*resume)(struct i2c_client *);
 
/* Alert callback, for example for the SMBus alert protocol.
* The format and meaning of the data value depends on the protocol.
166,10 → 159,10
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
 
enum i2c_slave_event {
I2C_SLAVE_REQ_READ_START,
I2C_SLAVE_REQ_READ_END,
I2C_SLAVE_REQ_WRITE_START,
I2C_SLAVE_REQ_WRITE_END,
I2C_SLAVE_READ_REQUESTED,
I2C_SLAVE_WRITE_REQUESTED,
I2C_SLAVE_READ_PROCESSED,
I2C_SLAVE_WRITE_RECEIVED,
I2C_SLAVE_STOP,
};
/**
180,7 → 173,7
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @acpi_node: ACPI device node
* @fwnode: device node supplied by the platform firmware
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
306,8 → 299,8
((const unsigned short []){ addr, ## addrs, I2C_CLIENT_END })
 
 
#endif /* __KERNEL__ */
 
 
/**
* struct i2c_msg - an I2C transaction segment beginning with START
* @addr: Slave address, either seven or ten bits. When this is a ten
/drivers/include/linux/ioport.h
100,6 → 100,7
/* PnP I/O specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
#define IORESOURCE_IO_FIXED (1<<1)
#define IORESOURCE_IO_SPARSE (1<<2)
 
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
110,10 → 111,63
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
 
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
{ \
.start = (_start), \
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
}
 
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
#define DEFINE_RES_IO(_start, _size) \
DEFINE_RES_IO_NAMED((_start), (_size), NULL)
 
#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
#define DEFINE_RES_MEM(_start, _size) \
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
 
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
#define DEFINE_RES_IRQ(_irq) \
DEFINE_RES_IRQ_NAMED((_irq), NULL)
 
#define DEFINE_RES_DMA_NAMED(_dma, _name) \
DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
#define DEFINE_RES_DMA(_dma) \
DEFINE_RES_DMA_NAMED((_dma), NULL)
 
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
extern struct resource ioport_resource;
extern struct resource iomem_resource;
 
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
resource_size_t max, resource_size_t align,
resource_size_t (*alignf)(void *,
const struct resource *,
resource_size_t,
resource_size_t),
void *alignf_data);
struct resource *lookup_resource(struct resource *root, resource_size_t start);
int adjust_resource(struct resource *res, resource_size_t start,
resource_size_t size);
resource_size_t resource_alignment(struct resource *res);
static inline resource_size_t resource_size(const struct resource *res)
{
return res->end - res->start + 1;
/drivers/include/linux/irqflags.h
85,7 → 85,7
* The local_irq_*() APIs are equal to the raw_local_irq*()
* if !TRACE_IRQFLAGS.
*/
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#ifdef CONFIG_TRACE_IRQFLAGS
#define local_irq_enable() \
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0)
#define local_irq_disable() \
107,23 → 107,7
raw_local_irq_restore(flags); \
} \
} while (0)
#define local_save_flags(flags) \
do { \
raw_local_save_flags(flags); \
} while (0)
 
#define irqs_disabled_flags(flags) \
({ \
raw_irqs_disabled_flags(flags); \
})
 
#define irqs_disabled() \
({ \
unsigned long _flags; \
raw_local_save_flags(_flags); \
raw_irqs_disabled_flags(_flags); \
})
 
#define safe_halt() \
do { \
trace_hardirqs_on(); \
131,7 → 115,7
} while (0)
 
 
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
#else /* !CONFIG_TRACE_IRQFLAGS */
 
#define local_irq_enable() do { raw_local_irq_enable(); } while (0)
#define local_irq_disable() do { raw_local_irq_disable(); } while (0)
140,11 → 124,28
raw_local_irq_save(flags); \
} while (0)
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0)
#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0)
#define irqs_disabled() (raw_irqs_disabled())
#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags))
#define safe_halt() do { raw_safe_halt(); } while (0)
 
#endif /* CONFIG_TRACE_IRQFLAGS */
 
#define local_save_flags(flags) raw_local_save_flags(flags)
 
/*
* Some architectures don't define arch_irqs_disabled(), so even if either
* definition would be fine we need to use different ones for the time being
* to avoid build issues.
*/
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
#define irqs_disabled() \
({ \
unsigned long _flags; \
raw_local_save_flags(_flags); \
raw_irqs_disabled_flags(_flags); \
})
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */
#define irqs_disabled() raw_irqs_disabled()
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
 
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
 
#endif
/drivers/include/linux/irqreturn.h
3,7 → 3,7
 
/**
* enum irqreturn
* @IRQ_NONE interrupt was not from this device
* @IRQ_NONE interrupt was not from this device or was not handled
* @IRQ_HANDLED interrupt was handled by this device
* @IRQ_WAKE_THREAD handler requests to wake the handler thread
*/
/drivers/include/linux/jiffies.h
292,11 → 292,145
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
 
extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value);
extern void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value);
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice round
* multiple of HZ, divide with the factor between them, but round
* upwards:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
}
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
* simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot overflow:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return m * (HZ / MSEC_PER_SEC);
}
#else
/*
* Generic case - multiply, round and divide. But first check that if
* we are doing a net multiplication, that we wouldn't overflow:
*/
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
{
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
}
#endif
/**
* msecs_to_jiffies: - convert milliseconds to jiffies
* @m: time in milliseconds
*
* conversion is done as follows:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows.
* for the details see __msecs_to_jiffies()
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static inline unsigned long msecs_to_jiffies(const unsigned int m)
{
if (__builtin_constant_p(m)) {
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
return _msecs_to_jiffies(m);
} else {
return __msecs_to_jiffies(m);
}
}
 
extern unsigned long __usecs_to_jiffies(const unsigned int u);
#if !(USEC_PER_SEC % HZ)
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
}
#else
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
{
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
}
#endif
 
/**
* usecs_to_jiffies: - convert microseconds to jiffies
* @u: time in microseconds
*
* conversion is done as follows:
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor and
* handling any 32-bit overflows as for msecs_to_jiffies.
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
* code, __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
* the HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
if (__builtin_constant_p(u)) {
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
return _usecs_to_jiffies(u);
} else {
return __usecs_to_jiffies(u);
}
}
 
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
static inline unsigned long timespec_to_jiffies(const struct timespec *value)
{
struct timespec64 ts = timespec_to_timespec64(*value);
 
return timespec64_to_jiffies(&ts);
}
 
static inline void jiffies_to_timespec(const unsigned long jiffies,
struct timespec *value)
{
struct timespec64 ts;
 
jiffies_to_timespec64(jiffies, &ts);
*value = timespec64_to_timespec(ts);
}
 
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
/drivers/include/linux/kernel.h
102,6 → 102,18
(((__x) - ((__d) / 2)) / (__d)); \
} \
)
/*
* Same as above but for u64 dividends. divisor must be a 32-bit
* number.
*/
#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \
{ \
typeof(divisor) __d = divisor; \
unsigned long long _tmp = (x) + (__d) / 2; \
do_div(_tmp, __d); \
_tmp; \
} \
)
 
/*
* Multiplies an integer by a fraction, while avoiding unnecessary
149,14 → 161,52
*/
#define lower_32_bits(n) ((u32)(n))
 
#ifdef CONFIG_PREEMPT_VOLUNTARY
extern int _cond_resched(void);
# define might_resched() _cond_resched()
#else
# define might_resched() do { } while (0)
#endif
 
/*
* abs() handles unsigned and signed longs, ints, shorts and chars. For all
* input types abs() returns a signed long.
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
* for those.
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
void ___might_sleep(const char *file, int line, int preempt_offset);
void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
* context (spinlock, irq-handler, ...).
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
* supposed to.
*/
#define abs(x) ({ \
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
# define sched_annotate_sleep() do { } while (0)
#endif
 
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
 
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first
* (s64, long or int depending on its size).
*
* Return: an absolute value of x. If x is 64-bit, macro's return type is s64,
* otherwise it is signed long.
*/
#define abs(x) __builtin_choose_expr(sizeof(x) == sizeof(s64), ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
}), ({ \
long ret; \
if (sizeof(x) == sizeof(long)) { \
long __x = (x); \
166,13 → 216,35
ret = (__x < 0) ? -__x : __x; \
} \
ret; \
})
}))
 
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
*
* Perform a "reciprocal multiplication" in order to "scale" a value into
* range [0, ep_ro), where the upper interval endpoint is right-open.
* This is useful, e.g. for accessing a index of an array containing
* ep_ro elements, for example. Think of it as sort of modulus, only that
* the result isn't that of modulo. ;) Note that if initial input is a
* small value, then result will return 0.
*
* Return: a result based on val in interval [0, ep_ro).
*/
static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
{
return (u32)(((u64) val * ep_ro) >> 32);
}
 
#if defined(CONFIG_MMU) && \
(defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
#define might_fault() __might_fault(__FILE__, __LINE__)
void __might_fault(const char *file, int line);
#else
static inline void might_fault(void) { }
#endif
 
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
181,6 → 253,15
#define KERN_NOTICE "<5>" /* normal but significant condition */
#define KERN_INFO "<6>" /* informational */
#define KERN_DEBUG "<7>" /* debug-level messages */
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
 
extern int num_to_str(char *buf, int size, unsigned long long num);
 
/* lib/printf utilities */
 
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
extern __printf(3, 4)
193,7 → 274,16
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
extern __printf(2, 0)
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
 
extern __scanf(2, 3)
int sscanf(const char *, const char *, ...);
extern __scanf(2, 0)
int vsscanf(const char *, const char *, va_list);
extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
enum lockdep_ok {
LOCKDEP_STILL_OK,
LOCKDEP_NOW_UNRELIABLE
229,6 → 319,7
#define TAINT_OOT_MODULE 12
#define TAINT_UNSIGNED_MODULE 13
#define TAINT_SOFTLOCKUP 14
#define TAINT_LIVEPATCH 15
 
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
277,12 → 368,6
*
* Most likely, you want to use tracing_on/tracing_off.
*/
#ifdef CONFIG_RING_BUFFER
/* trace_off_permanent stops recording with no way to bring it back */
void tracing_off_permanent(void);
#else
static inline void tracing_off_permanent(void) { }
#endif
 
enum ftrace_dump_mode {
DUMP_NONE,
426,10 → 511,10
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \
} while (0)
 
extern int
extern __printf(2, 0) int
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
 
extern int
extern __printf(2, 0) int
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
 
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
449,7 → 534,7
{
return 0;
}
static inline int
static __printf(1, 0) inline int
ftrace_vprintk(const char *fmt, va_list ap)
{
return 0;
566,10 → 651,12
#define VERIFY_OCTAL_PERMISSIONS(perms) \
(BUILD_BUG_ON_ZERO((perms) < 0) + \
BUILD_BUG_ON_ZERO((perms) > 0777) + \
/* User perms >= group perms >= other perms */ \
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \
/* Other writable? Generally considered a bad idea. */ \
/* USER_READABLE >= GROUP_READABLE >= OTHER_READABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 4) < (((perms) >> 3) & 4)) + \
BUILD_BUG_ON_ZERO((((perms) >> 3) & 4) < ((perms) & 4)) + \
/* USER_WRITABLE >= GROUP_WRITABLE */ \
BUILD_BUG_ON_ZERO((((perms) >> 6) & 2) < (((perms) >> 3) & 2)) + \
/* OTHER_WRITABLE? Generally considered a bad idea. */ \
BUILD_BUG_ON_ZERO((perms) & 2) + \
(perms))
 
591,23 → 678,7
struct vm_area_struct {};
struct address_space {};
 
struct device
{
struct device *parent;
void *driver_data;
};
 
static inline void dev_set_drvdata(struct device *dev, void *data)
{
dev->driver_data = data;
}
 
static inline void *dev_get_drvdata(struct device *dev)
{
return dev->driver_data;
}
 
 
#define in_dbg_master() (0)
 
#define HZ 100
728,10 → 799,7
#define IS_ENABLED(a) 0
 
 
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
 
 
 
#define cpufreq_quick_get_max(x) GetCpuFreq()
 
extern unsigned int tsc_khz;
771,8 → 839,6
return 0;
}
 
struct seq_file;
 
void *kmap(struct page *page);
void *kmap_atomic(struct page *page);
void kunmap(struct page *page);
787,5 → 853,4
 
#define CONFIG_PAGE_OFFSET 0
 
 
#endif
/drivers/include/linux/kobject.h
80,7 → 80,8
 
extern __printf(2, 3)
int kobject_set_name(struct kobject *kobj, const char *name, ...);
extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
extern __printf(2, 0)
int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs);
 
static inline const char *kobject_name(const struct kobject *kobj)
121,6 → 122,7
};
 
struct kobj_uevent_env {
char *argv[3];
char *envp[UEVENT_NUM_ENVP];
int envp_idx;
char buf[UEVENT_BUFFER_SIZE];
/drivers/include/linux/kref.h
25,10 → 25,147
atomic_t refcount;
};
 
void kref_init(struct kref *kref);
void kref_get(struct kref *kref);
int kref_put(struct kref *kref, void (*release) (struct kref *kref));
int kref_sub(struct kref *kref, unsigned int count,
void (*release) (struct kref *kref));
/**
* kref_init - initialize object.
* @kref: object in question.
*/
static inline void kref_init(struct kref *kref)
{
atomic_set(&kref->refcount, 1);
}
 
/**
* kref_get - increment refcount for object.
* @kref: object.
*/
static inline void kref_get(struct kref *kref)
{
/* If refcount was 0 before incrementing then we have a race
* condition when this kref is freeing by some other thread right now.
* In this case one should use kref_get_unless_zero()
*/
WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
}
 
/**
* kref_sub - subtract a number of refcounts for object.
* @kref: object.
* @count: Number of recounts to subtract.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
* Subtract @count from the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
static inline int kref_sub(struct kref *kref, unsigned int count,
void (*release)(struct kref *kref))
{
WARN_ON(release == NULL);
 
if (atomic_sub_and_test((int) count, &kref->refcount)) {
release(kref);
return 1;
}
return 0;
}
 
/**
* kref_put - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function. If the caller does pass kfree to this
* function, you will be publicly mocked mercilessly by the kref
* maintainer, and anyone else who happens to notice it. You have
* been warned.
*
* Decrement the refcount, and if 0, call release().
* Return 1 if the object was removed, otherwise return 0. Beware, if this
* function returns 0, you still can not count on the kref from remaining in
* memory. Only use the return value if you want to see if the kref is now
* gone, not present.
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
return kref_sub(kref, 1, release);
}
 
/**
* kref_put_spinlock_irqsave - decrement refcount for object.
* @kref: object.
* @release: pointer to the function that will clean up the object when the
* last reference to the object is released.
* This pointer is required, and it is not acceptable to pass kfree
* in as this function.
* @lock: lock to take in release case
*
* Behaves identical to kref_put with one exception. If the reference count
* drops to zero, the lock will be taken atomically wrt dropping the reference
* count. The release function has to call spin_unlock() without _irqrestore.
*/
static inline int kref_put_spinlock_irqsave(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
unsigned long flags;
 
WARN_ON(release == NULL);
if (atomic_add_unless(&kref->refcount, -1, 1))
return 0;
spin_lock_irqsave(lock, flags);
if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
local_irq_restore(flags);
return 1;
}
spin_unlock_irqrestore(lock, flags);
return 0;
}
 
static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
WARN_ON(release == NULL);
if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
mutex_lock(lock);
if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
mutex_unlock(lock);
return 0;
}
release(kref);
return 1;
}
return 0;
}
 
/**
* kref_get_unless_zero - Increment refcount for object unless it is zero.
* @kref: object.
*
* Return non-zero if the increment succeeded. Otherwise return 0.
*
* This function is intended to simplify locking around refcounting for
* objects that can be looked up from a lookup structure, and which are
* removed from that lookup structure in the object destructor.
* Operations on such objects require at least a read lock around
* lookup + kref_get, and a write lock around kref_put + remove from lookup
* structure. Furthermore, RCU implementations become extremely tricky.
* With a lookup followed by a kref_get_unless_zero *with return value check*
* locking in the kref_put path can be deferred to the actual removal from
* the lookup structure and RCU lookups become trivial.
*/
static inline int __must_check kref_get_unless_zero(struct kref *kref)
{
return atomic_add_unless(&kref->refcount, 1, 0);
}
#endif /* _KREF_H_ */
/drivers/include/linux/ktime.h
0,0 → 1,304
/*
* include/linux/ktime.h
*
* ktime_t - nanosecond-resolution time format.
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005, Red Hat, Inc., Ingo Molnar
*
* data type definitions, declarations, prototypes and macros.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Credits:
*
* Roman Zippel provided the ideas and primary code snippets of
* the ktime_t union and further simplifications of the original
* code.
*
* For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_KTIME_H
#define _LINUX_KTIME_H
 
#include <linux/time.h>
#include <linux/jiffies.h>
 
/*
* ktime_t:
*
* A single 64-bit variable is used to store the hrtimers
* internal representation of time values in scalar nanoseconds. The
* design plays out best on 64-bit CPUs, where most conversions are
* NOPs and most arithmetic ktime_t operations are plain arithmetic
* operations.
*
*/
union ktime {
s64 tv64;
};
 
typedef union ktime ktime_t; /* Kill this */
 
/**
* ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
* @secs: seconds to set
* @nsecs: nanoseconds to set
*
* Return: The ktime_t representation of the value.
*/
static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
{
if (unlikely(secs >= KTIME_SEC_MAX))
return (ktime_t){ .tv64 = KTIME_MAX };
 
return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
}
 
/* Subtract two ktime_t variables. rem = lhs -rhs: */
#define ktime_sub(lhs, rhs) \
({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
 
/* Add two ktime_t variables. res = lhs + rhs: */
#define ktime_add(lhs, rhs) \
({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
 
/*
* Add a ktime_t variable and a scalar nanosecond value.
* res = kt + nsval:
*/
#define ktime_add_ns(kt, nsval) \
({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
 
/*
* Subtract a scalar nanosecod from a ktime_t variable
* res = kt - nsval:
*/
#define ktime_sub_ns(kt, nsval) \
({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
 
/* convert a timespec to ktime_t format: */
static inline ktime_t timespec_to_ktime(struct timespec ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
 
/* convert a timespec64 to ktime_t format: */
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
 
/* convert a timeval to ktime_t format: */
static inline ktime_t timeval_to_ktime(struct timeval tv)
{
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
}
 
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
 
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
 
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
 
/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
#define ktime_to_ns(kt) ((kt).tv64)
 
 
/**
* ktime_equal - Compares two ktime_t variables to see if they are equal
* @cmp1: comparable1
* @cmp2: comparable2
*
* Compare two ktime_t variables.
*
* Return: 1 if equal.
*/
static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
{
return cmp1.tv64 == cmp2.tv64;
}
 
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
* @cmp1: comparable1
* @cmp2: comparable2
*
* Return: ...
* cmp1 < cmp2: return <0
* cmp1 == cmp2: return 0
* cmp1 > cmp2: return >0
*/
static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
{
if (cmp1.tv64 < cmp2.tv64)
return -1;
if (cmp1.tv64 > cmp2.tv64)
return 1;
return 0;
}
 
/**
* ktime_after - Compare if a ktime_t value is bigger than another one.
* @cmp1: comparable1
* @cmp2: comparable2
*
* Return: true if cmp1 happened after cmp2.
*/
static inline bool ktime_after(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) > 0;
}
 
/**
* ktime_before - Compare if a ktime_t value is smaller than another one.
* @cmp1: comparable1
* @cmp2: comparable2
*
* Return: true if cmp1 happened before cmp2.
*/
static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
{
return ktime_compare(cmp1, cmp2) < 0;
}
 
#if BITS_PER_LONG < 64
extern s64 __ktime_divns(const ktime_t kt, s64 div);
static inline s64 ktime_divns(const ktime_t kt, s64 div)
{
/*
* Negative divisors could cause an inf loop,
* so bug out here.
*/
BUG_ON(div < 0);
if (__builtin_constant_p(div) && !(div >> 32)) {
s64 ns = kt.tv64;
u64 tmp = ns < 0 ? -ns : ns;
 
do_div(tmp, div);
return ns < 0 ? -tmp : tmp;
} else {
return __ktime_divns(kt, div);
}
}
#else /* BITS_PER_LONG < 64 */
static inline s64 ktime_divns(const ktime_t kt, s64 div)
{
/*
* 32-bit implementation cannot handle negative divisors,
* so catch them on 64bit as well.
*/
WARN_ON(div < 0);
return kt.tv64 / div;
}
#endif
 
static inline s64 ktime_to_us(const ktime_t kt)
{
return ktime_divns(kt, NSEC_PER_USEC);
}
 
static inline s64 ktime_to_ms(const ktime_t kt)
{
return ktime_divns(kt, NSEC_PER_MSEC);
}
 
static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_us(ktime_sub(later, earlier));
}
 
static inline s64 ktime_ms_delta(const ktime_t later, const ktime_t earlier)
{
return ktime_to_ms(ktime_sub(later, earlier));
}
 
static inline ktime_t ktime_add_us(const ktime_t kt, const u64 usec)
{
return ktime_add_ns(kt, usec * NSEC_PER_USEC);
}
 
static inline ktime_t ktime_add_ms(const ktime_t kt, const u64 msec)
{
return ktime_add_ns(kt, msec * NSEC_PER_MSEC);
}
 
static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
{
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
 
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
 
/**
* ktime_to_timespec_cond - convert a ktime_t variable to timespec
* format only if the variable contains data
* @kt: the ktime_t variable to convert
* @ts: the timespec variable to store the result in
*
* Return: %true if there was a successful conversion, %false if kt was 0.
*/
static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
struct timespec *ts)
{
if (kt.tv64) {
*ts = ktime_to_timespec(kt);
return true;
} else {
return false;
}
}
 
/**
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
* format only if the variable contains data
* @kt: the ktime_t variable to convert
* @ts: the timespec variable to store the result in
*
* Return: %true if there was a successful conversion, %false if kt was 0.
*/
static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
struct timespec64 *ts)
{
if (kt.tv64) {
*ts = ktime_to_timespec64(kt);
return true;
} else {
return false;
}
}
 
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
* idea of the (in)accuracy of timers. Timer values are rounded up to
* this resolution values.
*/
#define LOW_RES_NSEC TICK_NSEC
#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
 
ktime_t ktime_get(void);
 
static inline ktime_t ns_to_ktime(u64 ns)
{
static const ktime_t ktime_zero = { .tv64 = 0 };
 
return ktime_add_ns(ktime_zero, ns);
}
 
static inline ktime_t ms_to_ktime(u64 ms)
{
static const ktime_t ktime_zero = { .tv64 = 0 };
 
return ktime_add_ms(ktime_zero, ms);
}
 
static inline u64 ktime_get_raw_ns(void)
{
return ktime_get().tv64;
}
 
#endif
/drivers/include/linux/list.h
672,6 → 672,11
n->pprev = &n->next;
}
 
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
 
/*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
/drivers/include/linux/lockdep.h
130,8 → 130,8
};
 
struct lock_class_stats {
unsigned long contention_point[4];
unsigned long contending_point[4];
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
struct lock_time read_waittime;
struct lock_time write_waittime;
struct lock_time read_holdtime;
255,6 → 255,7
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:12; /* 32 bits */
unsigned int pin_count;
};
 
/*
354,6 → 355,9
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
 
extern void lock_pin_lock(struct lockdep_map *lock);
extern void lock_unpin_lock(struct lockdep_map *lock);
 
# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
 
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
368,6 → 372,9
 
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
 
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
#define lockdep_unpin_lock(l) lock_unpin_lock(&(l)->dep_map)
 
#else /* !CONFIG_LOCKDEP */
 
static inline void lockdep_off(void)
420,6 → 427,9
 
#define lockdep_recursing(tsk) (0)
 
#define lockdep_pin_lock(l) do { (void)(l); } while (0)
#define lockdep_unpin_lock(l) do { (void)(l); } while (0)
 
#endif /* !LOCKDEP */
 
#ifdef CONFIG_LOCK_STAT
531,8 → 541,13
# define might_lock_read(lock) do { } while (0)
#endif
 
#ifdef CONFIG_PROVE_RCU
#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#else
static inline void
lockdep_rcu_suspicious(const char *file, const int line, const char *s)
{
}
#endif
 
#endif /* __LINUX_LOCKDEP_H */
/drivers/include/linux/math64.h
142,6 → 142,13
}
#endif /* mul_u64_u32_shr */
 
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u64_shr */
 
#else
 
#ifndef mul_u64_u32_shr
161,6 → 168,79
}
#endif /* mul_u64_u32_shr */
 
#ifndef mul_u64_u64_shr
static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} rl, rm, rn, rh, a0, b0;
u64 c;
 
a0.ll = a;
b0.ll = b;
 
rl.ll = (u64)a0.l.low * b0.l.low;
rm.ll = (u64)a0.l.low * b0.l.high;
rn.ll = (u64)a0.l.high * b0.l.low;
rh.ll = (u64)a0.l.high * b0.l.high;
 
/*
* Each of these lines computes a 64-bit intermediate result into "c",
* starting at bits 32-95. The low 32-bits go into the result of the
* multiplication, the high 32-bits are carried into the next step.
*/
rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
rh.l.high = (c >> 32) + rh.l.high;
 
/*
* The 128-bit result of the multiplication is in rl.ll and rh.ll,
* shift it right and throw away the high part of the result.
*/
if (shift == 0)
return rl.ll;
if (shift < 64)
return (rl.ll >> shift) | (rh.ll << (64 - shift));
return rh.ll >> (shift & 63);
}
#endif /* mul_u64_u64_shr */
 
#endif
 
#ifndef mul_u64_u32_div
static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
{
union {
u64 ll;
struct {
#ifdef __BIG_ENDIAN
u32 high, low;
#else
u32 low, high;
#endif
} l;
} u, rl, rh;
 
u.ll = a;
rl.ll = (u64)u.l.low * mul;
rh.ll = (u64)u.l.high * mul + rl.l.high;
 
/* Bits 32-63 of the result will be in rh.l.low. */
rl.l.high = do_div(rh.ll, divisor);
 
/* Bits 0-31 of the result will be in rl.l.low. */
do_div(rl.ll, divisor);
 
rl.l.high = rh.l.low;
return rl.ll;
}
#endif /* mul_u64_u32_div */
 
#endif /* _LINUX_MATH64_H */
/drivers/include/linux/mod_devicetable.h
53,9 → 53,9
 
/**
* struct usb_device_id - identifies USB devices for probing and hotplugging
* @match_flags: Bit mask controlling of the other fields are used to match
* against new devices. Any field except for driver_info may be used,
* although some only make sense in conjunction with other fields.
* @match_flags: Bit mask controlling which of the other fields are used to
* match against new devices. Any field except for driver_info may be
* used, although some only make sense in conjunction with other fields.
* This is usually set by a USB_DEVICE_*() macro, which sets all
* other fields in this structure except for driver_info.
* @idVendor: USB vendor ID for a device; numbers are assigned
189,6 → 189,8
struct acpi_device_id {
__u8 id[ACPI_ID_LEN];
kernel_ulong_t driver_data;
__u32 cls;
__u32 cls_msk;
};
 
#define PNP_ID_LEN 8
217,11 → 219,18
__u8 proto;
};
 
struct hda_device_id {
__u32 vendor_id;
__u32 rev_id;
__u8 api_version;
const char *name;
unsigned long driver_data;
};
 
/*
* Struct used for matching a device
*/
struct of_device_id
{
struct of_device_id {
char name[32];
char type[32];
char compatible[128];
365,8 → 374,6
} __attribute__((packed, aligned(2)));
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
#define SSB_DEVTABLE_END \
{ 0, },
 
#define SSB_ANY_VENDOR 0xFFFF
#define SSB_ANY_ID 0xFFFF
381,8 → 388,6
} __attribute__((packed,aligned(2)));
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
#define BCMA_CORETABLE_END \
{ 0, },
 
#define BCMA_ANY_MANUF 0xFFFF
#define BCMA_ANY_ID 0xFFFF
551,6 → 556,14
void *data;
};
 
/**
* struct mips_cdmm_device_id - identifies devices in MIPS CDMM bus
* @type: Device type identifier.
*/
struct mips_cdmm_device_id {
__u8 type;
};
 
/*
* Match x86 CPUs for CPU specific drivers.
* See documentation of "x86_match_cpu" for details.
596,9 → 609,21
 
#define MEI_CL_MODULE_PREFIX "mei:"
#define MEI_CL_NAME_SIZE 32
#define MEI_CL_VERSION_ANY 0xff
 
/**
* struct mei_cl_device_id - MEI client device identifier
* @name: helper name
* @uuid: client uuid
* @version: client protocol version
* @driver_info: information used by the driver.
*
* identifies mei client device by uuid and name
*/
struct mei_cl_device_id {
char name[MEI_CL_NAME_SIZE];
uuid_le uuid;
__u8 version;
kernel_ulong_t driver_info;
};
 
626,4 → 651,10
kernel_ulong_t driver_data;
};
 
struct ulpi_device_id {
__u16 vendor;
__u16 product;
kernel_ulong_t driver_data;
};
 
#endif /* LINUX_MOD_DEVICETABLE_H */
/drivers/include/linux/module.h
9,9 → 9,8
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/cache.h>
#include <linux/compiler.h>
 
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/printk.h>
/drivers/include/linux/moduleparam.h
2,9 → 2,69
#define _LINUX_MODULE_PARAMS_H
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */
#include <linux/kernel.h>
/**
* module_param - typesafe helper for a module/cmdline parameter
* @value: the variable to alter, and exposed parameter name.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
* @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
* @perm is 0 if the the variable is not to appear in sysfs, or 0444
* for world-readable, 0644 for root-writable, etc. Note that if it
* is writable, you may need to use kernel_param_lock() around
* accesses (esp. charp, which can be kfreed when it changes).
*
* The @type is simply pasted to refer to a param_ops_##type and a
* param_check_##type: for convenience many standard types are provided but
* you can create your own by defining those variables.
*
* Standard types are:
* byte, short, ushort, int, uint, long, ulong
* charp: a character pointer
* bool: a bool, values 0/1, y/n, Y/N.
* invbool: the above, only sense-reversed (N = true).
*/
#define module_param(name, type, perm) \
module_param_named(name, name, type, perm)
 
#define MODULE_PARM_DESC(_parm, desc)
/**
* module_param_unsafe - same as module_param but taints kernel
*/
#define module_param_unsafe(name, type, perm) \
module_param_named_unsafe(name, name, type, perm)
 
/**
* module_param_named - typesafe helper for a renamed module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @value: the actual lvalue to alter.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
* Usually it's a good idea to have variable names and user-exposed names the
* same, but that's harder if the variable must be non-static or is inside a
* structure. This allows exposure under a different name.
*/
#define module_param_named(name, value, type, perm)
/**
* module_param_named_unsafe - same as module_param_named but taints kernel
*/
 
#define module_param_named_unsafe(name, value, type, perm)
 
#define MODULE_PARM_DESC(_parm, desc)
 
#ifdef CONFIG_SYSFS
extern void kernel_param_lock(struct module *mod);
extern void kernel_param_unlock(struct module *mod);
#else
static inline void kernel_param_lock(struct module *mod)
{
}
static inline void kernel_param_unlock(struct module *mod)
{
}
#endif
#endif
/drivers/include/linux/pci.h
25,7 → 25,17
#include <linux/pci_regs.h> /* The pci register defines */
#include <linux/ioport.h>
 
#include <linux/device.h>
 
#if 0
struct device
{
struct device *parent;
void *driver_data;
};
 
#endif
 
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
 
204,6 → 214,10
*
* 7:3 = slot
* 2:0 = function
*
* PCI_DEVFN(), PCI_SLOT(), and PCI_FUNC() are defined in uapi/linux/pci.h.
* In the interest of not exposing interfaces to user-space unnecessarily,
* the following kernel-only defines are being added here.
*/
#define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
#define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
427,11 → 441,12
D3cold, not set for devices
powered on/off by the
corresponding bridge */
unsigned int ignore_hotplug:1; /* Ignore hotplug events */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
 
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
struct pcie_link_state *link_state; /* ASPM link state */
#endif
 
pci_channel_state_t error_state; /* current connectivity state */
446,13 → 461,15
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
 
bool match_driver; /* Skip attaching driver */
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
unsigned int transparent:1; /* Subtractive decode PCI bridge */
unsigned int multifunction:1;/* Part of multi-function device */
/* keep track of device state */
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
460,8 → 477,6
unsigned int msix_enabled:1;
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
unsigned int is_pcie:1; /* Obsolete. Will be removed.
Use pci_is_pcie() instead */
unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
/drivers/include/linux/percpu-defs.h
488,10 → 488,8
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1)
 
/*
* Operations with implied preemption protection. These operations can be
* used without worrying about preemption. Note that interrupts may still
* occur while an operation is in progress and if the interrupt modifies
* the variable too then RMW actions may not be reliable.
* Operations with implied preemption/interrupt protection. These
* operations can be used without worrying about preemption or interrupt.
*/
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp)
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val)
/drivers/include/linux/personality.h
44,11 → 44,9
*/
#define personality(pers) (pers & PER_MASK)
 
 
/*
* Change personality of the currently running process.
*/
#define set_personality(pers) \
((current->personality == (pers)) ? 0 : __set_personality(pers))
#define set_personality(pers) (current->personality = (pers))
 
#endif /* _LINUX_PERSONALITY_H */
/drivers/include/linux/pm.h
0,0 → 1,788
/*
* pm.h - Power management interface
*
* Copyright (C) 2000 Andrew Henroid
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
 
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
 
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
//#include <linux/timer.h>
#include <linux/completion.h>
 
/*
* Callbacks for platform drivers to implement.
*/
extern void (*pm_power_off)(void);
extern void (*pm_power_off_prepare)(void);
 
struct device; /* we have a circular dep with device.h */
#ifdef CONFIG_VT_CONSOLE_SLEEP
extern void pm_vt_switch_required(struct device *dev, bool required);
extern void pm_vt_switch_unregister(struct device *dev);
#else
static inline void pm_vt_switch_required(struct device *dev, bool required)
{
}
static inline void pm_vt_switch_unregister(struct device *dev)
{
}
#endif /* CONFIG_VT_CONSOLE_SLEEP */
 
/*
* Device power management
*/
 
struct device;
 
#ifdef CONFIG_PM
extern const char power_group_name[]; /* = "power" */
#else
#define power_group_name NULL
#endif
 
typedef struct pm_message {
int event;
} pm_message_t;
 
/**
* struct dev_pm_ops - device PM callbacks
*
* Several device power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be
* internal transitions to various low-power modes which are transparent
* to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use).
*
* The externally visible transitions are handled with the help of callbacks
* included in this structure in such a way that two levels of callbacks are
* involved. First, the PM core executes callbacks provided by PM domains,
* device types, classes and bus types. They are the subsystem-level callbacks
* supposed to execute callbacks provided by device drivers, although they may
* choose not to do that. If the driver callbacks are executed, they have to
* collaborate with the subsystem-level callbacks to achieve the goals
* appropriate for the given system transition, given transition phase and the
* subsystem the device belongs to.
*
* @prepare: The principal role of this callback is to prevent new children of
* the device from being registered after it has returned (the driver's
* subsystem and generally the rest of the kernel is supposed to prevent
* new calls to the probe method from being made too once @prepare() has
* succeeded). If @prepare() detects a situation it cannot handle (e.g.
* registration of a child already in progress), it may return -EAGAIN, so
* that the PM core can execute it once again (e.g. after a new child has
* been registered) to recover from the race condition.
* This method is executed for all kinds of suspend transitions and is
* followed by one of the suspend callbacks: @suspend(), @freeze(), or
* @poweroff(). If the transition is a suspend to memory or standby (that
* is, not related to hibernation), the return value of @prepare() may be
* used to indicate to the PM core to leave the device in runtime suspend
* if applicable. Namely, if @prepare() returns a positive number, the PM
* core will understand that as a declaration that the device appears to be
* runtime-suspended and it may be left in that state during the entire
* transition and during the subsequent resume if all of its descendants
* are left in runtime suspend too. If that happens, @complete() will be
* executed directly after @prepare() and it must ensure the proper
* functioning of the device after the system resume.
* The PM core executes subsystem-level @prepare() for all devices before
* starting to invoke suspend callbacks for any of them, so generally
* devices may be assumed to be functional or to respond to runtime resume
* requests while @prepare() is being executed. However, device drivers
* may NOT assume anything about the availability of user space at that
* time and it is NOT valid to request firmware from within @prepare()
* (it's too late to do that). It also is NOT valid to allocate
* substantial amounts of memory from @prepare() in the GFP_KERNEL mode.
* [To work around these limitations, drivers may register suspend and
* hibernation notifiers to be executed before the freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
* fails before the driver's suspend callback: @suspend(), @freeze() or
* @poweroff(), can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
* The PM core executes subsystem-level @complete() after it has executed
* the appropriate resume callbacks for all devices. If the corresponding
* @prepare() at the beginning of the suspend transition returned a
* positive number and the device was left in runtime suspend (without
* executing any suspend and resume callbacks for it), @complete() will be
* the only callback executed for the device during resume. In that case,
* @complete() must be prepared to do whatever is necessary to ensure the
* proper functioning of the device after the system resume. To this end,
* @complete() can check the power.direct_complete flag of the device to
* learn whether (unset) or not (set) the previous suspend and resume
* callbacks have been executed for it.
*
* @suspend: Executed before putting the system into a sleep state in which the
* contents of main memory are preserved. The exact action to perform
* depends on the device's subsystem (PM domain, device type, class or bus
* type), but generally the device must be quiescent after subsystem-level
* @suspend() has returned, so that it doesn't do any I/O or DMA.
* Subsystem-level @suspend() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @suspend_late: Continue operations started by @suspend(). For a number of
* devices @suspend_late() may point to the same callback routine as the
* runtime suspend callback.
*
* @resume: Executed after waking the system up from a sleep state in which the
* contents of main memory were preserved. The exact action to perform
* depends on the device's subsystem, but generally the driver is expected
* to start working again, responding to hardware events and software
* requests (the device itself may be left in a low-power state, waiting
* for a runtime resume to occur). The state of the device at the time its
* driver's @resume() callback is run depends on the platform and subsystem
* the device belongs to. On most platforms, there are no restrictions on
* availability of resources like clocks during @resume().
* Subsystem-level @resume() is executed for all devices after invoking
* subsystem-level @resume_noirq() for all of them.
*
* @resume_early: Prepare to execute @resume(). For a number of devices
* @resume_early() may point to the same callback routine as the runtime
* resume callback.
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
* Analogous to @suspend(), but it should not enable the device to signal
* wakeup events or change its power state. The majority of subsystems
* (with the notable exception of the PCI bus type) expect the driver-level
* @freeze() to save the device settings in memory to be used by @restore()
* during the subsequent resume from hibernation.
* Subsystem-level @freeze() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @freeze_late: Continue operations started by @freeze(). Analogous to
* @suspend_late(), but it should not enable the device to signal wakeup
* events or change its power state.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
* if the creation of an image has failed. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
* Subsystem-level @thaw() is executed for all devices after invoking
* subsystem-level @thaw_noirq() for all of them. It also may be executed
* directly after @freeze() in case of a transition error.
*
* @thaw_early: Prepare to execute @thaw(). Undo the changes made by the
* preceding @freeze_late().
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
* Analogous to @suspend(), but it need not save the device's settings in
* memory.
* Subsystem-level @poweroff() is executed for all devices after invoking
* subsystem-level @prepare() for all of them.
*
* @poweroff_late: Continue operations started by @poweroff(). Analogous to
* @suspend_late(), but it need not save the device's settings in memory.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
* memory from a hibernation image, analogous to @resume().
*
* @restore_early: Prepare to execute @restore(), analogous to @resume_early().
*
* @suspend_noirq: Complete the actions started by @suspend(). Carry out any
* additional operations required for suspending the device that might be
* racing with its driver's interrupt handler, which is guaranteed not to
* run while @suspend_noirq() is being executed.
* It generally is expected that the device will be in a low-power state
* (appropriate for the target system sleep state) after subsystem-level
* @suspend_noirq() has returned successfully. If the device can generate
* system wakeup signals and is enabled to wake up the system, it should be
* configured to do so at that time. However, depending on the platform
* and device's subsystem, @suspend() or @suspend_late() may be allowed to
* put the device into the low-power state and configure it to generate
* wakeup signals, in which case it generally is not necessary to define
* @suspend_noirq().
*
* @resume_noirq: Prepare for the execution of @resume() by carrying out any
* operations required for resuming the device that might be racing with
* its driver's interrupt handler, which is guaranteed not to run while
* @resume_noirq() is being executed.
*
* @freeze_noirq: Complete the actions started by @freeze(). Carry out any
* additional operations required for freezing the device that might be
* racing with its driver's interrupt handler, which is guaranteed not to
* run while @freeze_noirq() is being executed.
* The power state of the device should not be changed by either @freeze(),
* or @freeze_late(), or @freeze_noirq() and it should not be configured to
* signal system wakeup by any of these callbacks.
*
* @thaw_noirq: Prepare for the execution of @thaw() by carrying out any
* operations required for thawing the device that might be racing with its
* driver's interrupt handler, which is guaranteed not to run while
* @thaw_noirq() is being executed.
*
* @poweroff_noirq: Complete the actions started by @poweroff(). Analogous to
* @suspend_noirq(), but it need not save the device's settings in memory.
*
* @restore_noirq: Prepare for the execution of @restore() by carrying out any
* operations required for thawing the device that might be racing with its
* driver's interrupt handler, which is guaranteed not to run while
* @restore_noirq() is being executed. Analogous to @resume_noirq().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
* @thaw(), @restore(), @resume_noirq(), @thaw_noirq(), and @restore_noirq(), do
* not cause the PM core to abort the resume transition during which they are
* returned. The error codes returned in those cases are only printed by the PM
* core to the system logs for debugging purposes. Still, it is recommended
* that drivers only return error codes from their resume methods in case of an
* unrecoverable failure (i.e. when the device being handled refuses to resume
* and becomes unusable) to allow us to modify the PM core in the future, so
* that it can avoid attempting to handle devices that failed to resume and
* their children.
*
* It is allowed to unregister devices while the above callbacks are being
* executed. However, a callback routine must NOT try to unregister the device
* it was called for, although it may unregister children of that device (for
* example, if it detects that a child was unplugged while the system was
* asleep).
*
* Refer to Documentation/power/devices.txt for more information about the role
* of the above callbacks in the system suspend process.
*
* There also are callbacks related to runtime power management of devices.
* Again, these callbacks are executed by the PM core only for subsystems
* (PM domains, device types, classes and bus types) and the subsystem-level
* callbacks are supposed to invoke the driver callbacks. Moreover, the exact
* actions to be performed by a device driver's callbacks generally depend on
* the platform and subsystem the device belongs to.
*
* @runtime_suspend: Prepare the device for a condition in which it won't be
* able to communicate with the CPU(s) and RAM due to power management.
* This need not mean that the device should be put into a low-power state.
* For example, if the device is behind a link which is about to be turned
* off, the device may remain at full power. If the device does go to low
* power and is capable of generating runtime wakeup events, remote wakeup
* (i.e., a hardware mechanism allowing the device to request a change of
* its power state via an interrupt) should be enabled for it.
*
* @runtime_resume: Put the device into the fully active state in response to a
* wakeup event generated by hardware or at the request of software. If
* necessary, put the device into the full-power state and restore its
* registers, so that it is fully operational.
*
* @runtime_idle: Device appears to be inactive and it might be put into a
* low-power state if all of the necessary conditions are satisfied.
* Check these conditions, and return 0 if it's appropriate to let the PM
* core queue a suspend request for the device.
*
* Refer to Documentation/power/runtime_pm.txt for more information about the
* role of the above callbacks in device runtime power management.
*
*/
 
struct dev_pm_ops {
int (*prepare)(struct device *dev);
void (*complete)(struct device *dev);
int (*suspend)(struct device *dev);
int (*resume)(struct device *dev);
int (*freeze)(struct device *dev);
int (*thaw)(struct device *dev);
int (*poweroff)(struct device *dev);
int (*restore)(struct device *dev);
int (*suspend_late)(struct device *dev);
int (*resume_early)(struct device *dev);
int (*freeze_late)(struct device *dev);
int (*thaw_early)(struct device *dev);
int (*poweroff_late)(struct device *dev);
int (*restore_early)(struct device *dev);
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
int (*thaw_noirq)(struct device *dev);
int (*poweroff_noirq)(struct device *dev);
int (*restore_noirq)(struct device *dev);
int (*runtime_suspend)(struct device *dev);
int (*runtime_resume)(struct device *dev);
int (*runtime_idle)(struct device *dev);
};
 
#ifdef CONFIG_PM_SLEEP
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
.thaw = resume_fn, \
.poweroff = suspend_fn, \
.restore = resume_fn,
#else
#define SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
 
#ifdef CONFIG_PM_SLEEP
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend_late = suspend_fn, \
.resume_early = resume_fn, \
.freeze_late = suspend_fn, \
.thaw_early = resume_fn, \
.poweroff_late = suspend_fn, \
.restore_early = resume_fn,
#else
#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
 
#ifdef CONFIG_PM_SLEEP
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
.suspend_noirq = suspend_fn, \
.resume_noirq = resume_fn, \
.freeze_noirq = suspend_fn, \
.thaw_noirq = resume_fn, \
.poweroff_noirq = suspend_fn, \
.restore_noirq = resume_fn,
#else
#define SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
#endif
 
#ifdef CONFIG_PM
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
.runtime_suspend = suspend_fn, \
.runtime_resume = resume_fn, \
.runtime_idle = idle_fn,
#else
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
 
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
const struct dev_pm_ops name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
 
/*
* Use this for defining a set of PM operations to be used in all situations
* (system suspend, hibernation or runtime PM).
* NOTE: In general, system suspend callbacks, .suspend() and .resume(), should
* be different from the corresponding runtime PM callbacks, .runtime_suspend(),
* and .runtime_resume(), because .runtime_suspend() always works on an already
* quiescent device, while .suspend() should assume that the device may be doing
* something when it is called (it should ensure that the device will be
* quiescent after it has returned). Therefore it's better to point the "late"
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
*/
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
const struct dev_pm_ops name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
 
/**
* PM_EVENT_ messages
*
* The following PM_EVENT_ messages are defined for the internal use of the PM
* core, in order to provide a mechanism allowing the high level suspend and
* hibernation code to convey the necessary information to the device PM core
* code:
*
* ON No transition.
*
* FREEZE System is going to hibernate, call ->prepare() and ->freeze()
* for all devices.
*
* SUSPEND System is going to suspend, call ->prepare() and ->suspend()
* for all devices.
*
* HIBERNATE Hibernation image has been saved, call ->prepare() and
* ->poweroff() for all devices.
*
* QUIESCE Contents of main memory are going to be restored from a (loaded)
* hibernation image, call ->prepare() and ->freeze() for all
* devices.
*
* RESUME System is resuming, call ->resume() and ->complete() for all
* devices.
*
* THAW Hibernation image has been created, call ->thaw() and
* ->complete() for all devices.
*
* RESTORE Contents of main memory have been restored from a hibernation
* image, call ->restore() and ->complete() for all devices.
*
* RECOVER Creation of a hibernation image or restoration of the main
* memory contents from a hibernation image has failed, call
* ->thaw() and ->complete() for all devices.
*
* The following PM_EVENT_ messages are defined for internal use by
* kernel subsystems. They are never issued by the PM core.
*
* USER_SUSPEND Manual selective suspend was issued by userspace.
*
* USER_RESUME Manual selective resume was issued by userspace.
*
* REMOTE_WAKEUP Remote-wakeup request was received from the device.
*
* AUTO_SUSPEND Automatic (device idle) runtime suspend was
* initiated by the subsystem.
*
* AUTO_RESUME Automatic (device needed) runtime resume was
* requested by a driver.
*/
 
#define PM_EVENT_INVALID (-1)
#define PM_EVENT_ON 0x0000
#define PM_EVENT_FREEZE 0x0001
#define PM_EVENT_SUSPEND 0x0002
#define PM_EVENT_HIBERNATE 0x0004
#define PM_EVENT_QUIESCE 0x0008
#define PM_EVENT_RESUME 0x0010
#define PM_EVENT_THAW 0x0020
#define PM_EVENT_RESTORE 0x0040
#define PM_EVENT_RECOVER 0x0080
#define PM_EVENT_USER 0x0100
#define PM_EVENT_REMOTE 0x0200
#define PM_EVENT_AUTO 0x0400
 
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PM_EVENT_USER_SUSPEND (PM_EVENT_USER | PM_EVENT_SUSPEND)
#define PM_EVENT_USER_RESUME (PM_EVENT_USER | PM_EVENT_RESUME)
#define PM_EVENT_REMOTE_RESUME (PM_EVENT_REMOTE | PM_EVENT_RESUME)
#define PM_EVENT_AUTO_SUSPEND (PM_EVENT_AUTO | PM_EVENT_SUSPEND)
#define PM_EVENT_AUTO_RESUME (PM_EVENT_AUTO | PM_EVENT_RESUME)
 
#define PMSG_INVALID ((struct pm_message){ .event = PM_EVENT_INVALID, })
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
#define PMSG_USER_SUSPEND ((struct pm_message) \
{ .event = PM_EVENT_USER_SUSPEND, })
#define PMSG_USER_RESUME ((struct pm_message) \
{ .event = PM_EVENT_USER_RESUME, })
#define PMSG_REMOTE_RESUME ((struct pm_message) \
{ .event = PM_EVENT_REMOTE_RESUME, })
#define PMSG_AUTO_SUSPEND ((struct pm_message) \
{ .event = PM_EVENT_AUTO_SUSPEND, })
#define PMSG_AUTO_RESUME ((struct pm_message) \
{ .event = PM_EVENT_AUTO_RESUME, })
 
#define PMSG_IS_AUTO(msg) (((msg).event & PM_EVENT_AUTO) != 0)
 
/**
* Device run-time power management status.
*
* These status labels are used internally by the PM core to indicate the
* current status of a device with respect to the PM core operations. They do
* not reflect the actual power state of the device or its status as seen by the
* driver.
*
* RPM_ACTIVE Device is fully operational. Indicates that the device
* bus type's ->runtime_resume() callback has completed
* successfully.
*
* RPM_SUSPENDED Device bus type's ->runtime_suspend() callback has
* completed successfully. The device is regarded as
* suspended.
*
* RPM_RESUMING Device bus type's ->runtime_resume() callback is being
* executed.
*
* RPM_SUSPENDING Device bus type's ->runtime_suspend() callback is being
* executed.
*/
 
enum rpm_status {
RPM_ACTIVE = 0,
RPM_RESUMING,
RPM_SUSPENDED,
RPM_SUSPENDING,
};
 
/**
* Device run-time power management request types.
*
* RPM_REQ_NONE Do nothing.
*
* RPM_REQ_IDLE Run the device bus type's ->runtime_idle() callback
*
* RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback
*
* RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has
* been inactive for as long as power.autosuspend_delay
*
* RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback
*/
 
enum rpm_request {
RPM_REQ_NONE = 0,
RPM_REQ_IDLE,
RPM_REQ_SUSPEND,
RPM_REQ_AUTOSUSPEND,
RPM_REQ_RESUME,
};
 
struct wakeup_source;
struct wake_irq;
struct pm_domain_data;
 
struct pm_subsys_data {
spinlock_t lock;
unsigned int refcount;
#ifdef CONFIG_PM_CLK
struct list_head clock_list;
#endif
#ifdef CONFIG_PM_GENERIC_DOMAINS
struct pm_domain_data *domain_data;
#endif
};
 
struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
bool ignore_children:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
spinlock_t lock;
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
struct completion completion;
struct wakeup_source *wakeup;
bool wakeup_path:1;
bool syscore:1;
#else
unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM
struct timer_list suspend_timer;
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
struct wake_irq *wakeirq;
atomic_t usage_count;
atomic_t child_count;
unsigned int disable_depth:3;
unsigned int idle_notification:1;
unsigned int request_pending:1;
unsigned int deferred_resume:1;
unsigned int run_wake:1;
unsigned int runtime_auto:1;
unsigned int no_callbacks:1;
unsigned int irq_safe:1;
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
int autosuspend_delay;
unsigned long last_busy;
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
struct dev_pm_qos *qos;
};
 
extern void update_pm_runtime_accounting(struct device *dev);
extern int dev_pm_get_subsys_data(struct device *dev);
extern void dev_pm_put_subsys_data(struct device *dev);
 
/*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions along with
* subsystem-level and driver-level callbacks.
*
* @detach: Called when removing a device from the domain.
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
};
 
/*
* The PM_EVENT_ messages are also used by drivers implementing the legacy
* suspend framework, based on the ->suspend() and ->resume() callbacks common
* for suspend and hibernation transitions, according to the rules below.
*/
 
/* Necessary, because several drivers use PM_EVENT_PRETHAW */
#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
 
/*
* One transition is triggered by resume(), after a suspend() call; the
* message is implicit:
*
* ON Driver starts working again, responding to hardware events
* and software requests. The hardware may have gone through
* a power-off reset, or it may have maintained state from the
* previous suspend() which the driver will rely on while
* resuming. On most platforms, there are no restrictions on
* availability of resources like clocks during resume().
*
* Other transitions are triggered by messages sent using suspend(). All
* these transitions quiesce the driver, so that I/O queues are inactive.
* That commonly entails turning off IRQs and DMA; there may be rules
* about how to quiesce that are specific to the bus or the device's type.
* (For example, network drivers mark the link state.) Other details may
* differ according to the message:
*
* SUSPEND Quiesce, enter a low power device state appropriate for
* the upcoming system state (such as PCI_D3hot), and enable
* wakeup events as appropriate.
*
* HIBERNATE Enter a low power device state appropriate for the hibernation
* state (eg. ACPI S4) and enable wakeup events as appropriate.
*
* FREEZE Quiesce operations so that a consistent image can be saved;
* but do NOT otherwise enter a low power device state, and do
* NOT emit system wakeup events.
*
* PRETHAW Quiesce as if for FREEZE; additionally, prepare for restoring
* the system from a snapshot taken after an earlier FREEZE.
* Some drivers will need to reset their hardware state instead
* of preserving it, to ensure that it's never mistaken for the
* state which that earlier snapshot had set up.
*
* A minimally power-aware driver treats all messages as SUSPEND, fully
* reinitializes its device during resume() -- whether or not it was reset
* during the suspend/resume cycle -- and can't issue wakeup events.
*
* More power-aware drivers may also use low power states at runtime as
* well as during system sleep states like PM_SUSPEND_STANDBY. They may
* be able to use wakeup events to exit from runtime low-power states,
* or from system low-power states such as standby or suspend-to-RAM.
*/
 
#ifdef CONFIG_PM_SLEEP
extern void device_pm_lock(void);
extern void dpm_resume_start(pm_message_t state);
extern void dpm_resume_end(pm_message_t state);
extern void dpm_resume_noirq(pm_message_t state);
extern void dpm_resume_early(pm_message_t state);
extern void dpm_resume(pm_message_t state);
extern void dpm_complete(pm_message_t state);
 
extern void device_pm_unlock(void);
extern int dpm_suspend_end(pm_message_t state);
extern int dpm_suspend_start(pm_message_t state);
extern int dpm_suspend_noirq(pm_message_t state);
extern int dpm_suspend_late(pm_message_t state);
extern int dpm_suspend(pm_message_t state);
extern int dpm_prepare(pm_message_t state);
 
extern void __suspend_report_result(const char *function, void *fn, int ret);
 
#define suspend_report_result(fn, ret) \
do { \
__suspend_report_result(__func__, fn, ret); \
} while (0)
 
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
extern void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *));
 
extern int pm_generic_prepare(struct device *dev);
extern int pm_generic_suspend_late(struct device *dev);
extern int pm_generic_suspend_noirq(struct device *dev);
extern int pm_generic_suspend(struct device *dev);
extern int pm_generic_resume_early(struct device *dev);
extern int pm_generic_resume_noirq(struct device *dev);
extern int pm_generic_resume(struct device *dev);
extern int pm_generic_freeze_noirq(struct device *dev);
extern int pm_generic_freeze_late(struct device *dev);
extern int pm_generic_freeze(struct device *dev);
extern int pm_generic_thaw_noirq(struct device *dev);
extern int pm_generic_thaw_early(struct device *dev);
extern int pm_generic_thaw(struct device *dev);
extern int pm_generic_restore_noirq(struct device *dev);
extern int pm_generic_restore_early(struct device *dev);
extern int pm_generic_restore(struct device *dev);
extern int pm_generic_poweroff_noirq(struct device *dev);
extern int pm_generic_poweroff_late(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
extern void pm_complete_with_resume_check(struct device *dev);
 
#else /* !CONFIG_PM_SLEEP */
 
#define device_pm_lock() do {} while (0)
#define device_pm_unlock() do {} while (0)
 
static inline int dpm_suspend_start(pm_message_t state)
{
return 0;
}
 
#define suspend_report_result(fn, ret) do {} while (0)
 
static inline int device_pm_wait_for_dev(struct device *a, struct device *b)
{
return 0;
}
 
static inline void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
{
}
 
#define pm_generic_prepare NULL
#define pm_generic_suspend_late NULL
#define pm_generic_suspend_noirq NULL
#define pm_generic_suspend NULL
#define pm_generic_resume_early NULL
#define pm_generic_resume_noirq NULL
#define pm_generic_resume NULL
#define pm_generic_freeze_noirq NULL
#define pm_generic_freeze_late NULL
#define pm_generic_freeze NULL
#define pm_generic_thaw_noirq NULL
#define pm_generic_thaw_early NULL
#define pm_generic_thaw NULL
#define pm_generic_restore_noirq NULL
#define pm_generic_restore_early NULL
#define pm_generic_restore NULL
#define pm_generic_poweroff_noirq NULL
#define pm_generic_poweroff_late NULL
#define pm_generic_poweroff NULL
#define pm_generic_complete NULL
#endif /* !CONFIG_PM_SLEEP */
 
/* How to reorder dpm_list after device_move() */
enum dpm_order {
DPM_ORDER_NONE,
DPM_ORDER_DEV_AFTER_PARENT,
DPM_ORDER_PARENT_BEFORE_DEV,
DPM_ORDER_DEV_LAST,
};
 
#endif /* _LINUX_PM_H */
/drivers/include/linux/pm_runtime.h
0,0 → 1,276
/*
* pm_runtime.h - Device run-time power management helper functions.
*
* Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*/
 
#ifndef _LINUX_PM_RUNTIME_H
#define _LINUX_PM_RUNTIME_H
 
#include <linux/device.h>
#include <linux/pm.h>
 
#include <linux/jiffies.h>
 
/* Runtime PM flag argument bits */
#define RPM_ASYNC 0x01 /* Request is asynchronous */
#define RPM_NOWAIT 0x02 /* Don't wait for concurrent
state change */
#define RPM_GET_PUT 0x04 /* Increment/decrement the
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
 
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
 
static inline bool queue_pm_work(struct work_struct *work)
{
return queue_work(pm_wq, work);
}
 
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
extern int pm_runtime_force_suspend(struct device *dev);
extern int pm_runtime_force_resume(struct device *dev);
 
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
extern void pm_runtime_enable(struct device *dev);
extern void __pm_runtime_disable(struct device *dev, bool check_resume);
extern void pm_runtime_allow(struct device *dev);
extern void pm_runtime_forbid(struct device *dev);
extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
 
static inline bool pm_children_suspended(struct device *dev)
{
return dev->power.ignore_children
|| !atomic_read(&dev->power.child_count);
}
 
static inline void pm_runtime_get_noresume(struct device *dev)
{
atomic_inc(&dev->power.usage_count);
}
 
static inline void pm_runtime_put_noidle(struct device *dev)
{
atomic_add_unless(&dev->power.usage_count, -1, 0);
}
 
static inline bool device_run_wake(struct device *dev)
{
return dev->power.run_wake;
}
 
static inline void device_set_run_wake(struct device *dev, bool enable)
{
dev->power.run_wake = enable;
}
 
static inline bool pm_runtime_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED
&& !dev->power.disable_depth;
}
 
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
 
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
}
 
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
}
 
static inline bool pm_runtime_callbacks_present(struct device *dev)
{
return !dev->power.no_callbacks;
}
 
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
ACCESS_ONCE(dev->power.last_busy) = jiffies;
}
 
static inline bool pm_runtime_is_irq_safe(struct device *dev)
{
return dev->power.irq_safe;
}
 
#else /* !CONFIG_PM */
 
static inline bool queue_pm_work(struct work_struct *work) { return false; }
 
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
 
static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
return -ENOSYS;
}
static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
{
return 1;
}
static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
return -ENOSYS;
}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
static inline void pm_runtime_enable(struct device *dev) {}
static inline void __pm_runtime_disable(struct device *dev, bool c) {}
static inline void pm_runtime_allow(struct device *dev) {}
static inline void pm_runtime_forbid(struct device *dev) {}
 
static inline bool pm_children_suspended(struct device *dev) { return false; }
static inline void pm_runtime_get_noresume(struct device *dev) {}
static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; }
static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
 
static inline void pm_runtime_no_callbacks(struct device *dev) {}
static inline void pm_runtime_irq_safe(struct device *dev) {}
static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
 
static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
 
#endif /* !CONFIG_PM */
 
static inline int pm_runtime_idle(struct device *dev)
{
return __pm_runtime_idle(dev, 0);
}
 
static inline int pm_runtime_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, 0);
}
 
static inline int pm_runtime_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_AUTO);
}
 
static inline int pm_runtime_resume(struct device *dev)
{
return __pm_runtime_resume(dev, 0);
}
 
static inline int pm_request_idle(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_ASYNC);
}
 
static inline int pm_request_resume(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_ASYNC);
}
 
static inline int pm_request_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
}
 
static inline int pm_runtime_get(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
}
 
static inline int pm_runtime_get_sync(struct device *dev)
{
return __pm_runtime_resume(dev, RPM_GET_PUT);
}
 
static inline int pm_runtime_put(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
}
 
static inline int pm_runtime_put_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev,
RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
}
 
static inline int pm_runtime_put_sync(struct device *dev)
{
return __pm_runtime_idle(dev, RPM_GET_PUT);
}
 
static inline int pm_runtime_put_sync_suspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT);
}
 
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
{
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
}
 
static inline int pm_runtime_set_active(struct device *dev)
{
return __pm_runtime_set_status(dev, RPM_ACTIVE);
}
 
static inline void pm_runtime_set_suspended(struct device *dev)
{
__pm_runtime_set_status(dev, RPM_SUSPENDED);
}
 
static inline void pm_runtime_disable(struct device *dev)
{
__pm_runtime_disable(dev, true);
}
 
static inline void pm_runtime_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, true);
}
 
static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
{
__pm_runtime_use_autosuspend(dev, false);
}
 
#endif
/drivers/include/linux/poison.h
19,8 → 19,8
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
 
/********** include/linux/timer.h **********/
/*
69,10 → 69,6
#define ATM_POISON_FREE 0x12
#define ATM_POISON 0xdeadbeef
 
/********** net/ **********/
#define NEIGHBOR_DEAD 0xdeadbeef
#define NETFILTER_LINK_POISON 0xdead57ac
 
/********** kernel/mutexes **********/
#define MUTEX_DEBUG_INIT 0x11
#define MUTEX_DEBUG_FREE 0x22
83,7 → 79,4
/********** security/ **********/
#define KEY_DESTROY 0xbd
 
/********** sound/oss/ **********/
#define OSS_POISON_FREE 0xAB
 
#endif
/drivers/include/linux/preempt.h
10,17 → 10,124
#include <linux/list.h>
 
/*
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
* the other bits -- can't include that header due to inclusion hell.
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
*
* The hardirq count could in theory be the same as the number of
* interrupts in the system, but we run all interrupt handlers with
* interrupts disabled, so we cannot have nesting interrupts. Though
* there are a few palaeontologic drivers which reenable interrupts in
* the handler, so we need more than one bit here.
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
* NMI_MASK: 0x00100000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
#define NMI_BITS 1
 
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
 
#define __IRQ_MASK(x) ((1UL << (x))-1)
 
#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
 
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
 
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
 
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
 
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
 
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
 
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
* in_softirq - Are we currently processing softirq or have bh disabled?
* in_serving_softirq - Are we currently processing softirq?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
 
/*
* Are we in NMI context?
*/
#define in_nmi() (preempt_count() & NMI_MASK)
 
/*
* The preempt_count offset after preempt_disable();
*/
#if defined(CONFIG_PREEMPT_COUNT)
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
# define PREEMPT_DISABLE_OFFSET 0
#endif
 
/*
* The preempt_count offset after spin_lock()
*/
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
 
/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
*
* Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
* softirqs, such that unlock sequences of:
*
* spin_unlock();
* local_bh_enable();
*
* Work as expected.
*/
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
 
/*
* Are we running in atomic context? WARNING: this macro cannot
* always detect atomic context; in particular, it cannot know about
* held spinlocks in non-preemptible kernels. Thus it should not be
* used in the general case to determine whether sleeping is possible.
* Do not use in_atomic() in driver code.
*/
#define in_atomic() (preempt_count() != 0)
 
/*
* Check whether we were atomic before we did preempt_disable():
* (used by the scheduler)
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
 
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
#define preempt_count_dec_and_test() \
({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
49,6 → 156,8
 
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
 
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
57,52 → 166,46
__preempt_schedule(); \
} while (0)
 
#define preempt_enable_notrace() \
do { \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_notrace(); \
} while (0)
 
#define preempt_check_resched() \
do { \
if (should_resched()) \
if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
 
#else
#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
} while (0)
#define preempt_check_resched() do { } while (0)
#endif
 
#define preempt_disable_notrace() \
#define preempt_enable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
} while (0)
 
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
 
#ifdef CONFIG_PREEMPT
#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */
 
#ifndef CONFIG_CONTEXT_TRACKING
#define __preempt_schedule_context() __preempt_schedule()
#endif
 
#define preempt_enable_notrace() \
#define preempt_disable_notrace() \
do { \
__preempt_count_inc(); \
barrier(); \
if (unlikely(__preempt_count_dec_and_test())) \
__preempt_schedule_context(); \
} while (0)
#else
#define preempt_enable_notrace() \
 
#define preempt_enable_no_resched_notrace() \
do { \
barrier(); \
__preempt_count_dec(); \
} while (0)
#endif
 
#else /* !CONFIG_PREEMPT_COUNT */
 
121,6 → 224,7
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
#define preemptible() 0
 
#endif /* CONFIG_PREEMPT_COUNT */
 
180,6 → 284,8
struct preempt_ops *ops;
};
 
void preempt_notifier_inc(void);
void preempt_notifier_dec(void);
void preempt_notifier_register(struct preempt_notifier *notifier);
void preempt_notifier_unregister(struct preempt_notifier *notifier);
 
/drivers/include/linux/printk.h
65,6 → 65,10
*/
#define DEPRECATED "[Deprecated]: "
 
/*
* Dummy printk for disabled debugging statements to use whilst maintaining
* gcc's format and side-effect checking.
*/
static inline __printf(1, 2)
int no_printk(const char *fmt, ...)
{
103,6 → 107,11
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
/*
* Like KERN_CONT, pr_cont() should only be used when continuing
* a line with no newline ('\n') enclosed. Otherwise it defaults
* back to KERN_DEFAULT.
*/
#define pr_cont(fmt, ...) \
printk(KERN_CONT fmt, ##__VA_ARGS__)
 
250,9 → 259,9
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
extern void hex_dump_to_buffer(const void *buf, size_t len,
int rowsize, int groupsize,
char *linebuf, size_t linebuflen, bool ascii);
extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii);
 
extern void print_hex_dump(const char *level, const char *prefix_str,
int prefix_type, int rowsize, int groupsize,
/drivers/include/linux/pwm.h
0,0 → 1,219
#ifndef __LINUX_PWM_H
#define __LINUX_PWM_H
 
#include <linux/err.h>
#include <linux/mutex.h>
//#include <linux/of.h>
 
struct device;
struct pwm_device;
struct seq_file;
 
#if IS_ENABLED(CONFIG_PWM)
/*
* pwm_request - request a PWM device
*/
struct pwm_device *pwm_request(int pwm_id, const char *label);
 
/*
* pwm_free - free a PWM device
*/
void pwm_free(struct pwm_device *pwm);
 
/*
* pwm_config - change a PWM device configuration
*/
int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns);
 
/*
* pwm_enable - start a PWM output toggling
*/
int pwm_enable(struct pwm_device *pwm);
 
/*
* pwm_disable - stop a PWM output toggling
*/
void pwm_disable(struct pwm_device *pwm);
#else
static inline struct pwm_device *pwm_request(int pwm_id, const char *label)
{
return ERR_PTR(-ENODEV);
}
 
static inline void pwm_free(struct pwm_device *pwm)
{
}
 
static inline int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
{
return -EINVAL;
}
 
static inline int pwm_enable(struct pwm_device *pwm)
{
return -EINVAL;
}
 
static inline void pwm_disable(struct pwm_device *pwm)
{
}
#endif
 
struct pwm_chip;
 
/**
* enum pwm_polarity - polarity of a PWM signal
* @PWM_POLARITY_NORMAL: a high signal for the duration of the duty-
* cycle, followed by a low signal for the remainder of the pulse
* period
* @PWM_POLARITY_INVERSED: a low signal for the duration of the duty-
* cycle, followed by a high signal for the remainder of the pulse
* period
*/
enum pwm_polarity {
PWM_POLARITY_NORMAL,
PWM_POLARITY_INVERSED,
};
 
enum {
PWMF_REQUESTED = 1 << 0,
PWMF_ENABLED = 1 << 1,
PWMF_EXPORTED = 1 << 2,
};
 
/**
* struct pwm_device - PWM channel object
* @label: name of the PWM device
* @flags: flags associated with the PWM device
* @hwpwm: per-chip relative index of the PWM device
* @pwm: global index of the PWM device
* @chip: PWM chip providing this PWM device
* @chip_data: chip-private data associated with the PWM device
* @lock: used to serialize accesses to the PWM device where necessary
* @period: period of the PWM signal (in nanoseconds)
* @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
* @polarity: polarity of the PWM signal
*/
struct pwm_device {
const char *label;
unsigned long flags;
unsigned int hwpwm;
unsigned int pwm;
struct pwm_chip *chip;
void *chip_data;
struct mutex lock;
 
unsigned int period;
unsigned int duty_cycle;
enum pwm_polarity polarity;
};
 
static inline bool pwm_is_enabled(const struct pwm_device *pwm)
{
return test_bit(PWMF_ENABLED, &pwm->flags);
}
 
static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
{
if (pwm)
pwm->period = period;
}
 
static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
{
return pwm ? pwm->period : 0;
}
 
static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
{
if (pwm)
pwm->duty_cycle = duty;
}
 
static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
{
return pwm ? pwm->duty_cycle : 0;
}
 
/*
* pwm_set_polarity - configure the polarity of a PWM signal
*/
int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
 
static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
{
return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
}
 
/**
* struct pwm_ops - PWM controller operations
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @config: configure duty cycles and period length for this PWM
* @set_polarity: configure the polarity of this PWM
* @enable: enable PWM output toggling
* @disable: disable PWM output toggling
* @dbg_show: optional routine to show contents in debugfs
* @owner: helps prevent removal of modules exporting active PWMs
*/
struct pwm_ops {
int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns);
int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
enum pwm_polarity polarity);
int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
#ifdef CONFIG_DEBUG_FS
void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
#endif
struct module *owner;
};
 
#if IS_ENABLED(CONFIG_PWM)
int pwm_set_chip_data(struct pwm_device *pwm, void *data);
void *pwm_get_chip_data(struct pwm_device *pwm);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id);
void pwm_put(struct pwm_device *pwm);
 
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id);
void devm_pwm_put(struct device *dev, struct pwm_device *pwm);
 
bool pwm_can_sleep(struct pwm_device *pwm);
#else
static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
{
return -EINVAL;
}
 
static inline void *pwm_get_chip_data(struct pwm_device *pwm)
{
return NULL;
}
static inline struct pwm_device *pwm_get(struct device *dev,
const char *consumer)
{
return ERR_PTR(-ENODEV);
}
static inline void pwm_put(struct pwm_device *pwm)
{
}
 
static inline struct pwm_device *devm_pwm_get(struct device *dev,
const char *consumer)
{
return ERR_PTR(-ENODEV);
}
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
}
 
static inline bool pwm_can_sleep(struct pwm_device *pwm)
{
return false;
}
#endif
 
#endif /* __LINUX_PWM_H */
/drivers/include/linux/rbtree.h
51,7 → 51,7
 
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
 
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
85,6 → 85,15
*rb_link = node;
}
 
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
 
rcu_assign_pointer(*rb_link, node);
}
 
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
91,13 → 100,21
})
 
/**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
* rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
* given type allowing the backing memory of @pos to be invalidated
*
* @pos: the 'type *' to use as a loop cursor.
* @n: another 'type *' to use as temporary storage
* @root: 'rb_root *' of the rbtree.
* @field: the name of the rb_node field within 'type'.
*
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
* list_for_each_entry_safe() and allows the iteration to continue independent
* of changes to @pos by the body of the loop.
*
* Note, however, that it cannot handle other modifications that re-order the
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
* rb_erase() may rebalance the tree, causing us to miss some nodes.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
/drivers/include/linux/rbtree_augmented.h
123,11 → 123,11
{
if (parent) {
if (parent->rb_left == old)
parent->rb_left = new;
WRITE_ONCE(parent->rb_left, new);
else
parent->rb_right = new;
WRITE_ONCE(parent->rb_right, new);
} else
root->rb_node = new;
WRITE_ONCE(root->rb_node, new);
}
 
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
137,7 → 137,8
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
const struct rb_augment_callbacks *augment)
{
struct rb_node *child = node->rb_right, *tmp = node->rb_left;
struct rb_node *child = node->rb_right;
struct rb_node *tmp = node->rb_left;
struct rb_node *parent, *rebalance;
unsigned long pc;
 
167,6 → 168,7
tmp = parent;
} else {
struct rb_node *successor = child, *child2;
 
tmp = child->rb_left;
if (!tmp) {
/*
180,6 → 182,7
*/
parent = successor;
child2 = successor->rb_right;
 
augment->copy(node, successor);
} else {
/*
201,19 → 204,23
successor = tmp;
tmp = tmp->rb_left;
} while (tmp);
parent->rb_left = child2 = successor->rb_right;
successor->rb_right = child;
child2 = successor->rb_right;
WRITE_ONCE(parent->rb_left, child2);
WRITE_ONCE(successor->rb_right, child);
rb_set_parent(child, successor);
 
augment->copy(node, successor);
augment->propagate(parent, successor);
}
 
successor->rb_left = tmp = node->rb_left;
tmp = node->rb_left;
WRITE_ONCE(successor->rb_left, tmp);
rb_set_parent(tmp, successor);
 
pc = node->__rb_parent_color;
tmp = __rb_parent(pc);
__rb_change_child(node, successor, tmp, root);
 
if (child2) {
successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
/drivers/include/linux/rculist.h
29,8 → 29,8
*/
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
{
ACCESS_ONCE(list->next) = list;
ACCESS_ONCE(list->prev) = list;
WRITE_ONCE(list->next, list);
WRITE_ONCE(list->prev, list);
}
 
/*
247,10 → 247,7
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
({ \
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
})
container_of(lockless_dereference(ptr), type, member)
 
/**
* Where are list_empty_rcu() and list_first_entry_rcu()?
288,7 → 285,7
#define list_first_or_null_rcu(ptr, type, member) \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
 
524,11 → 521,11
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member); \
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
 
/**
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
536,11 → 533,11
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member); \
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member); \
pos; \
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
 
/**
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
549,8 → 546,8
*/
#define hlist_for_each_entry_from_rcu(pos, member) \
for (; pos; \
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
typeof(*(pos)), member))
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
&(pos)->member)), typeof(*(pos)), member))
 
#endif /* __KERNEL__ */
#endif
/drivers/include/linux/rcupdate.h
44,10 → 44,32
//#include <linux/debugobjects.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
 
#include <asm/barrier.h>
 
extern int rcu_expedited; /* for sysctl */
 
#ifdef CONFIG_TINY_RCU
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */
{
return false;
}
 
static inline void rcu_expedite_gp(void)
{
}
 
static inline void rcu_unexpedite_gp(void)
{
}
#else /* #ifdef CONFIG_TINY_RCU */
bool rcu_gp_is_expedited(void); /* Internal RCU use. */
void rcu_expedite_gp(void);
void rcu_unexpedite_gp(void);
#endif /* #else #ifdef CONFIG_TINY_RCU */
 
enum rcutorture_type {
RCU_FLAVOR,
RCU_BH_FLAVOR,
138,7 → 160,7
* more than one CPU).
*/
void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
rcu_callback_t func);
 
#else /* #ifdef CONFIG_PREEMPT_RCU */
 
169,7 → 191,7
* memory ordering guarantees.
*/
void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
rcu_callback_t func);
 
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
191,7 → 213,7
* memory ordering guarantees.
*/
void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu));
rcu_callback_t func);
 
void synchronize_sched(void);
 
213,7 → 235,7
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void rcu_barrier_tasks(void);
 
236,11 → 258,13
 
static inline void __rcu_read_lock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
preempt_disable();
}
 
static inline void __rcu_read_unlock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
preempt_enable();
}
 
258,14 → 282,13
 
/* Internal to kernel */
void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
void rcu_idle_enter(void);
void rcu_idle_exit(void);
void rcu_irq_enter(void);
void rcu_irq_exit(void);
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
 
#ifdef CONFIG_RCU_STALL_COMMON
void rcu_sysrq_start(void);
279,7 → 302,7
}
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
 
#ifdef CONFIG_RCU_USER_QS
#ifdef CONFIG_NO_HZ_FULL
void rcu_user_enter(void);
void rcu_user_exit(void);
#else
287,7 → 310,7
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */
#endif /* CONFIG_NO_HZ_FULL */
 
#ifdef CONFIG_RCU_NOCB_CPU
void rcu_init_nohz(void);
331,12 → 354,13
extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch(t) \
do { \
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
rcu_all_qs(); \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
 
/**
361,10 → 385,6
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
 
typedef void call_rcu_func_t(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);
 
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
576,17 → 596,17
 
#define __rcu_access_pointer(p, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
rcu_dereference_sparse(p, space); \
((typeof(*p) __force __kernel *)(_________p1)); \
})
#define __rcu_dereference_check(p, c, space) \
({ \
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
/* Dependency order vs. p above. */ \
typeof(*p) *________p1 = (typeof(*p) *__force)lockless_dereference(p); \
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \
rcu_dereference_sparse(p, space); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
((typeof(*p) __force __kernel *)(_________p1)); \
((typeof(*p) __force __kernel *)(________p1)); \
})
#define __rcu_dereference_protected(p, c, space) \
({ \
595,21 → 615,6
((typeof(*p) __force __kernel *)(p)); \
})
 
#define __rcu_access_index(p, space) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
rcu_dereference_sparse(p, space); \
(_________p1); \
})
#define __rcu_dereference_index_check(p, c) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
rcu_lockdep_assert(c, \
"suspicious rcu_dereference_index_check() usage"); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/**
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable
* @v: The value to statically initialize with.
617,21 → 622,6
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
 
/**
* lockless_dereference() - safely load a pointer for later dereference
* @p: The pointer to load
*
* Similar to rcu_dereference(), but for situations where the pointed-to
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = ACCESS_ONCE(p); \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
 
/**
* rcu_assign_pointer() - assign to RCU-protected pointer
* @p: pointer to assign to
* @v: value to assign (publish)
669,7 → 659,7
* @p: The pointer to read
*
* Return the value of the specified RCU-protected pointer, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* smp_read_barrier_depends() and keep the READ_ONCE(). This is useful
* when the value of this pointer is accessed, but the pointer is not
* dereferenced, for example, when testing an RCU-protected pointer against
* NULL. Although rcu_access_pointer() may also be used in cases where
719,7 → 709,7
* annotated as __rcu.
*/
#define rcu_dereference_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
__rcu_dereference_check((p), (c) || rcu_read_lock_held(), __rcu)
 
/**
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
729,7 → 719,7
* This is the RCU-bh counterpart to rcu_dereference_check().
*/
#define rcu_dereference_bh_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
__rcu_dereference_check((p), (c) || rcu_read_lock_bh_held(), __rcu)
 
/**
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
739,7 → 729,7
* This is the RCU-sched counterpart to rcu_dereference_check().
*/
#define rcu_dereference_sched_check(p, c) \
__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
__rcu_dereference_check((p), (c) || rcu_read_lock_sched_held(), \
__rcu)
 
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
754,47 → 744,12
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
 
/**
* rcu_access_index() - fetch RCU index with no dereferencing
* @p: The index to read
*
* Return the value of the specified RCU-protected index, but omit the
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful
* when the value of this index is accessed, but the index is not
* dereferenced, for example, when testing an RCU-protected index against
* -1. Although rcu_access_index() may also be used in cases where
* update-side locks prevent the value of the index from changing, you
* should instead use rcu_dereference_index_protected() for this use case.
*/
#define rcu_access_index(p) __rcu_access_index((p), __rcu)
 
/**
* rcu_dereference_index_check() - rcu_dereference for indices with debug checking
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Similar to rcu_dereference_check(), but omits the sparse checking.
* This allows rcu_dereference_index_check() to be used on integers,
* which can then be used as array indices. Attempting to use
* rcu_dereference_check() on an integer will give compiler warnings
* because the sparse address-space mechanism relies on dereferencing
* the RCU-protected pointer. Dereferencing integers is not something
* that even gcc will put up with.
*
* Note that this function does not implicitly check for RCU read-side
* critical sections. If this function gains lots of uses, it might
* make sense to provide versions for each flavor of RCU, but it does
* not make sense as of early 2010.
*/
#define rcu_dereference_index_check(p, c) \
__rcu_dereference_index_check((p), (c))
 
/**
* rcu_dereference_protected() - fetch RCU pointer when updates prevented
* @p: The pointer to read, prior to dereferencing
* @c: The conditions under which the dereference will take place
*
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This
* both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
* pointer from changing. Please note that this primitive does -not-
* prevent the compiler from repeating this reference or combining it
932,9 → 887,9
{
rcu_lockdep_assert(rcu_is_watching(),
"rcu_read_unlock() used illegally while idle");
rcu_lock_release(&rcu_lock_map);
__release(RCU);
__rcu_read_unlock();
rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
}
 
/**
1120,13 → 1075,13
#define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
#ifdef CONFIG_TINY_RCU
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
{
*delta_jiffies = ULONG_MAX;
*nextevt = KTIME_MAX;
return 0;
}
#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
#endif /* #ifdef CONFIG_TINY_RCU */
 
#if defined(CONFIG_RCU_NOCB_CPU_ALL)
static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
/drivers/include/linux/rcutiny.h
92,21 → 92,53
}
 
/*
* Return the number of grace periods.
* Return the number of grace periods started.
*/
static inline long rcu_batches_completed(void)
static inline unsigned long rcu_batches_started(void)
{
return 0;
}
 
/*
* Return the number of bottom-half grace periods.
* Return the number of bottom-half grace periods started.
*/
static inline long rcu_batches_completed_bh(void)
static inline unsigned long rcu_batches_started_bh(void)
{
return 0;
}
 
/*
* Return the number of sched grace periods started.
*/
static inline unsigned long rcu_batches_started_sched(void)
{
return 0;
}
 
/*
* Return the number of grace periods completed.
*/
static inline unsigned long rcu_batches_completed(void)
{
return 0;
}
 
/*
* Return the number of bottom-half grace periods completed.
*/
static inline unsigned long rcu_batches_completed_bh(void)
{
return 0;
}
 
/*
* Return the number of sched grace periods completed.
*/
static inline unsigned long rcu_batches_completed_sched(void)
{
return 0;
}
 
static inline void rcu_force_quiescent_state(void)
{
}
127,6 → 159,22
{
}
 
static inline void rcu_idle_enter(void)
{
}
 
static inline void rcu_idle_exit(void)
{
}
 
static inline void rcu_irq_enter(void)
{
}
 
static inline void rcu_irq_exit(void)
{
}
 
static inline void exit_rcu(void)
{
}
154,7 → 202,10
return true;
}
 
 
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
 
static inline void rcu_all_qs(void)
{
}
 
#endif /* __LINUX_RCUTINY_H */
/drivers/include/linux/scatterlist.h
2,13 → 2,38
#define _LINUX_SCATTERLIST_H
 
#include <linux/string.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/mm.h>
 
#include <asm/types.h>
#include <asm/scatterlist.h>
//#include <asm/io.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
 
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries dma_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
 
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
18,10 → 43,9
/*
* Notes on SG table design.
*
* Architectures must provide an unsigned long page_link field in the
* scatterlist struct. We use that to place the page pointer AND encode
* information about the sg table as well. The two lower bits are reserved
* for this information.
* We use the unsigned long page_link field in the scatterlist struct to place
* the page pointer AND encode information about the sg table as well. The two
* lower bits are reserved for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
108,14 → 132,14
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
//#ifdef CONFIG_DEBUG_SG
// BUG_ON(!virt_addr_valid(buf));
//#endif
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(!virt_addr_valid(buf));
#endif
sg_set_page(sg, (struct page*)((unsigned)buf&0xFFFFF000), buflen, offset_in_page(buf));
}
 
/*
* Loop over each sg element, following the pointer to a new list if necessary
136,10 → 160,6
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
BUG();
#endif
 
/*
* offset and length are unused for chain entry. Clear them.
*/
221,10 → 241,16
//}
 
int sg_nents(struct scatterlist *sg);
int sg_nents_for_len(struct scatterlist *sg, u64 len);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
int sg_split(struct scatterlist *in, const int in_mapped_nents,
const off_t skip, const int nb_splits,
const size_t *split_sizes,
struct scatterlist **out, int *out_mapped_nents,
gfp_t gfp_mask);
 
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
239,13 → 265,16
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
 
size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
size_t buflen, off_t skip, bool to_buffer);
 
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
const void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
 
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
const void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
 
349,4 → 378,6
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
 
#define dma_unmap_sg(d, s, n, r)
 
#endif /* _LINUX_SCATTERLIST_H */
/drivers/include/linux/sched.h
2,7 → 2,47
#define _LINUX_SCHED_H
 
 
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_DEAD 16
#define EXIT_ZOMBIE 32
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
#define TASK_STATE_MAX 2048
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
 
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
 
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 
/* get_task_state() */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
/* Task command name length */
#define TASK_COMM_LEN 16
 
/drivers/include/linux/seq_file.h
1,9 → 1,26
/* stub */
#ifndef _LINUX_SEQ_FILE_H
#define _LINUX_SEQ_FILE_H
 
#include <linux/types.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mutex.h>
 
struct seq_file {
char *buf;
size_t size;
size_t from;
size_t count;
size_t pad_until;
loff_t index;
loff_t read_pos;
u64 version;
void *private;
};
 
int seq_puts(struct seq_file *m, const char *s);
 
__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
 
 
#endif
/drivers/include/linux/seqlock.h
35,6 → 35,7
#include <linux/spinlock.h>
//#include <linux/preempt.h>
#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <asm/processor.h>
 
/*
108,7 → 109,7
unsigned ret;
 
repeat:
ret = ACCESS_ONCE(s->sequence);
ret = READ_ONCE(s->sequence);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
127,7 → 128,7
*/
static inline unsigned raw_read_seqcount(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret;
}
179,7 → 180,7
*/
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
{
unsigned ret = ACCESS_ONCE(s->sequence);
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
return ret & ~1;
}
236,6 → 237,79
/*
* raw_write_seqcount_latch - redirect readers to even/odd copy
* @s: pointer to seqcount_t
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
* interrupt the modification -- e.g. the concurrency is strictly between CPUs
* -- you most likely do not need this.
*
* Where the traditional RCU/lockless data structures rely on atomic
* modifications to ensure queries observe either the old or the new state the
* latch allows the same for non-atomic updates. The trade-off is doubling the
* cost of storage; we have to maintain two copies of the entire data
* structure.
*
* Very simply put: we first modify one copy and then the other. This ensures
* there is always one copy in a stable state, ready to give us an answer.
*
* The basic form is a data structure like:
*
* struct latch_struct {
* seqcount_t seq;
* struct data_struct data[2];
* };
*
* Where a modification, which is assumed to be externally serialized, does the
* following:
*
* void latch_modify(struct latch_struct *latch, ...)
* {
* smp_wmb(); <- Ensure that the last data[1] update is visible
* latch->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
*
* modify(latch->data[0], ...);
*
* smp_wmb(); <- Ensure that the data[0] update is visible
* latch->seq++;
* smp_wmb(); <- Ensure that the seqcount update is visible
*
* modify(latch->data[1], ...);
* }
*
* The query will have a form like:
*
* struct entry *latch_query(struct latch_struct *latch, ...)
* {
* struct entry *entry;
* unsigned seq, idx;
*
* do {
* seq = lockless_dereference(latch->seq);
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
*
* smp_rmb();
* } while (seq != latch->seq);
*
* return entry;
* }
*
* So during the modification, queries are first redirected to data[1]. Then we
* modify data[0]. When that is complete, we redirect queries back to data[0]
* and we can modify data[1].
*
* NOTE: The non-requirement for atomic modifications does _NOT_ include
* the publishing of new entries in the case where data is a dynamic
* data structure.
*
* An iteration might start in data[0] and get suspended long enough
* to miss an entire modification sequence, once it resumes it might
* observe the new entry.
*
* NOTE: When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
static inline void raw_write_seqcount_latch(seqcount_t *s)
{
266,13 → 340,13
}
 
/**
* write_seqcount_barrier - invalidate in-progress read-side seq operations
* write_seqcount_invalidate - invalidate in-progress read-side seq operations
* @s: pointer to seqcount_t
*
* After write_seqcount_barrier, no read-side seq operations will complete
* After write_seqcount_invalidate, no read-side seq operations will complete
* successfully and see data older than this.
*/
static inline void write_seqcount_barrier(seqcount_t *s)
static inline void write_seqcount_invalidate(seqcount_t *s)
{
smp_wmb();
s->sequence+=2;
/drivers/include/linux/sfi.h
0,0 → 1,209
/* sfi.h Simple Firmware Interface */
 
/*
 
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
 
GPL LICENSE SUMMARY
 
Copyright(c) 2009 Intel Corporation. All rights reserved.
 
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
 
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
 
BSD LICENSE
 
Copyright(c) 2009 Intel Corporation. All rights reserved.
 
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
 
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
 
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
*/
 
#ifndef _LINUX_SFI_H
#define _LINUX_SFI_H
 
//#include <linux/init.h>
#include <linux/types.h>
 
/* Table signatures reserved by the SFI specification */
#define SFI_SIG_SYST "SYST"
#define SFI_SIG_FREQ "FREQ"
#define SFI_SIG_IDLE "IDLE"
#define SFI_SIG_CPUS "CPUS"
#define SFI_SIG_MTMR "MTMR"
#define SFI_SIG_MRTC "MRTC"
#define SFI_SIG_MMAP "MMAP"
#define SFI_SIG_APIC "APIC"
#define SFI_SIG_XSDT "XSDT"
#define SFI_SIG_WAKE "WAKE"
#define SFI_SIG_DEVS "DEVS"
#define SFI_SIG_GPIO "GPIO"
 
#define SFI_SIGNATURE_SIZE 4
#define SFI_OEM_ID_SIZE 6
#define SFI_OEM_TABLE_ID_SIZE 8
 
#define SFI_NAME_LEN 16
 
#define SFI_SYST_SEARCH_BEGIN 0x000E0000
#define SFI_SYST_SEARCH_END 0x000FFFFF
 
#define SFI_GET_NUM_ENTRIES(ptable, entry_type) \
((ptable->header.len - sizeof(struct sfi_table_header)) / \
(sizeof(entry_type)))
/*
* Table structures must be byte-packed to match the SFI specification,
* as they are provided by the BIOS.
*/
struct sfi_table_header {
char sig[SFI_SIGNATURE_SIZE];
u32 len;
u8 rev;
u8 csum;
char oem_id[SFI_OEM_ID_SIZE];
char oem_table_id[SFI_OEM_TABLE_ID_SIZE];
} __packed;
 
struct sfi_table_simple {
struct sfi_table_header header;
u64 pentry[1];
} __packed;
 
/* Comply with UEFI spec 2.1 */
struct sfi_mem_entry {
u32 type;
u64 phys_start;
u64 virt_start;
u64 pages;
u64 attrib;
} __packed;
 
struct sfi_cpu_table_entry {
u32 apic_id;
} __packed;
 
struct sfi_cstate_table_entry {
u32 hint; /* MWAIT hint */
u32 latency; /* latency in ms */
} __packed;
 
struct sfi_apic_table_entry {
u64 phys_addr; /* phy base addr for APIC reg */
} __packed;
 
struct sfi_freq_table_entry {
u32 freq_mhz; /* in MHZ */
u32 latency; /* transition latency in ms */
u32 ctrl_val; /* value to write to PERF_CTL */
} __packed;
 
struct sfi_wake_table_entry {
u64 phys_addr; /* pointer to where the wake vector locates */
} __packed;
 
struct sfi_timer_table_entry {
u64 phys_addr; /* phy base addr for the timer */
u32 freq_hz; /* in HZ */
u32 irq;
} __packed;
 
struct sfi_rtc_table_entry {
u64 phys_addr; /* phy base addr for the RTC */
u32 irq;
} __packed;
 
struct sfi_device_table_entry {
u8 type; /* bus type, I2C, SPI or ...*/
#define SFI_DEV_TYPE_SPI 0
#define SFI_DEV_TYPE_I2C 1
#define SFI_DEV_TYPE_UART 2
#define SFI_DEV_TYPE_HSI 3
#define SFI_DEV_TYPE_IPC 4
 
u8 host_num; /* attached to host 0, 1...*/
u16 addr;
u8 irq;
u32 max_freq;
char name[SFI_NAME_LEN];
} __packed;
 
struct sfi_gpio_table_entry {
char controller_name[SFI_NAME_LEN];
u16 pin_no;
char pin_name[SFI_NAME_LEN];
} __packed;
 
typedef int (*sfi_table_handler) (struct sfi_table_header *table);
 
#ifdef CONFIG_SFI
extern void __init sfi_init(void);
extern int __init sfi_platform_init(void);
extern void __init sfi_init_late(void);
extern int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
sfi_table_handler handler);
 
extern int sfi_disabled;
static inline void disable_sfi(void)
{
sfi_disabled = 1;
}
 
#else /* !CONFIG_SFI */
 
static inline void sfi_init(void)
{
}
 
static inline void sfi_init_late(void)
{
}
 
#define sfi_disabled 0
 
static inline int sfi_table_parse(char *signature, char *oem_id,
char *oem_table_id,
sfi_table_handler handler)
{
return -1;
}
 
#endif /* !CONFIG_SFI */
 
#endif /*_LINUX_SFI_H*/
/drivers/include/linux/slab.h
18,7 → 18,7
 
/*
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
104,7 → 104,11
(unsigned long)ZERO_SIZE_PTR)
 
void __init kmem_cache_init(void);
int slab_is_available(void);
bool slab_is_available(void);
 
struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
unsigned long,
void (*)(void *));
void kmem_cache_destroy(struct kmem_cache *);
int kmem_cache_shrink(struct kmem_cache *);
void kmem_cache_free(struct kmem_cache *, void *);
120,7 → 124,9
}
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
return __builtin_malloc(size);
void *ret = __builtin_malloc(size);
memset(ret, 0, size);
return ret;
}
 
/**
/drivers/include/linux/spinlock.h
119,7 → 119,7
/*
* Despite its name it doesn't necessarily has to be a full barrier.
* It should only guarantee that a STORE before the critical section
* can not be reordered with a LOAD inside this section.
* can not be reordered with LOADs and STOREs inside this section.
* spin_lock() is the one-way barrier, this LOAD can not escape out
* of the region. So the default implementation simply ensures that
* a STORE can not move into the critical section, smp_wmb() should
129,16 → 129,6
#define smp_mb__before_spinlock() smp_wmb()
#endif
 
/*
* Place this after a lock-acquisition primitive to guarantee that
* an UNLOCK+LOCK pair act as a full barrier. This guarantee applies
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
#ifndef smp_mb__after_unlock_lock
#define smp_mb__after_unlock_lock() do { } while (0)
#endif
 
/**
* raw_spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
189,6 → 179,8
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock_nested(lock, subclass)
# define raw_spin_lock_bh_nested(lock, subclass) \
_raw_spin_lock_bh_nested(lock, subclass)
 
# define raw_spin_lock_nest_lock(lock, nest_lock) \
do { \
204,6 → 196,7
# define raw_spin_lock_nested(lock, subclass) \
_raw_spin_lock(((void)(subclass), (lock)))
# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
# define raw_spin_lock_bh_nested(lock, subclass) _raw_spin_lock_bh(lock)
#endif
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
292,7 → 285,7
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
 
static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
{
return &lock->rlock;
}
303,17 → 296,17
raw_spin_lock_init(&(_lock)->rlock); \
} while (0)
 
static inline void spin_lock(spinlock_t *lock)
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
}
 
static inline void spin_lock_bh(spinlock_t *lock)
static __always_inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
 
static inline int spin_trylock(spinlock_t *lock)
static __always_inline int spin_trylock(spinlock_t *lock)
{
return raw_spin_trylock(&lock->rlock);
}
323,12 → 316,17
raw_spin_lock_nested(spinlock_check(lock), subclass); \
} while (0)
 
#define spin_lock_bh_nested(lock, subclass) \
do { \
raw_spin_lock_bh_nested(spinlock_check(lock), subclass);\
} while (0)
 
#define spin_lock_nest_lock(lock, nest_lock) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
 
static inline void spin_lock_irq(spinlock_t *lock)
static __always_inline void spin_lock_irq(spinlock_t *lock)
{
raw_spin_lock_irq(&lock->rlock);
}
343,32 → 341,32
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
 
static inline void spin_unlock(spinlock_t *lock)
static __always_inline void spin_unlock(spinlock_t *lock)
{
raw_spin_unlock(&lock->rlock);
}
 
static inline void spin_unlock_bh(spinlock_t *lock)
static __always_inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
 
static inline void spin_unlock_irq(spinlock_t *lock)
static __always_inline void spin_unlock_irq(spinlock_t *lock)
{
raw_spin_unlock_irq(&lock->rlock);
}
 
static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
raw_spin_unlock_irqrestore(&lock->rlock, flags);
}
 
static inline int spin_trylock_bh(spinlock_t *lock)
static __always_inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
 
static inline int spin_trylock_irq(spinlock_t *lock)
static __always_inline int spin_trylock_irq(spinlock_t *lock)
{
return raw_spin_trylock_irq(&lock->rlock);
}
378,22 → 376,22
raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
})
 
static inline void spin_unlock_wait(spinlock_t *lock)
static __always_inline void spin_unlock_wait(spinlock_t *lock)
{
raw_spin_unlock_wait(&lock->rlock);
}
 
static inline int spin_is_locked(spinlock_t *lock)
static __always_inline int spin_is_locked(spinlock_t *lock)
{
return raw_spin_is_locked(&lock->rlock);
}
 
static inline int spin_is_contended(spinlock_t *lock)
static __always_inline int spin_is_contended(spinlock_t *lock)
{
return raw_spin_is_contended(&lock->rlock);
}
 
static inline int spin_can_lock(spinlock_t *lock)
static __always_inline int spin_can_lock(spinlock_t *lock)
{
return raw_spin_can_lock(&lock->rlock);
}
/drivers/include/linux/spinlock_api_up.h
57,6 → 57,7
 
#define _raw_spin_lock(lock) __LOCK(lock)
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
/drivers/include/linux/stddef.h
3,7 → 3,6
 
#include <uapi/linux/stddef.h>
 
 
#undef NULL
#define NULL ((void *)0)
 
18,4 → 17,14
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
 
/**
* offsetofend(TYPE, MEMBER)
*
* @TYPE: The type of the structure
* @MEMBER: The member within the structure to get the end offset of
*/
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
 
#endif
/drivers/include/linux/string.h
25,6 → 25,9
#ifndef __HAVE_ARCH_STRLCPY
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
ssize_t __must_check strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
40,9 → 43,6
#ifndef __HAVE_ARCH_STRNCMP
extern int strncmp(const char *,const char *,__kernel_size_t);
#endif
#ifndef __HAVE_ARCH_STRNICMP
#define strnicmp strncasecmp
#endif
#ifndef __HAVE_ARCH_STRCASECMP
extern int strcasecmp(const char *s1, const char *s2);
#endif
114,8 → 114,12
extern void * memchr(const void *,int,__kernel_size_t);
#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
 
extern void kfree_const(const void *x);
 
extern char *kstrdup(const char *s, gfp_t gfp);
extern const char *kstrdup_const(const char *s, gfp_t gfp);
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
 
/drivers/include/linux/swab.h
1,284 → 1,8
#ifndef _LINUX_SWAB_H
#define _LINUX_SWAB_H
 
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/swab.h>
#include <uapi/linux/swab.h>
 
/*
* casts are necessary for constants, because we never know how for sure
* how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
*/
#define ___constant_swab16(x) ((__u16)( \
(((__u16)(x) & (__u16)0x00ffU) << 8) | \
(((__u16)(x) & (__u16)0xff00U) >> 8)))
 
#define ___constant_swab32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
(((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \
(((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \
(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
 
#define ___constant_swab64(x) ((__u64)( \
(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \
(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \
(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
 
#define ___constant_swahw32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x0000ffffUL) << 16) | \
(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
 
#define ___constant_swahb32(x) ((__u32)( \
(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) | \
(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
 
/*
* Implement the following as inlines, but define the interface using
* macros to allow constant folding when possible:
* ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
*/
 
static inline __attribute_const__ __u16 __fswab16(__u16 val)
{
#ifdef __arch_swab16
return __arch_swab16(val);
#else
return ___constant_swab16(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswab32(__u32 val)
{
#ifdef __arch_swab32
return __arch_swab32(val);
#else
return ___constant_swab32(val);
#endif
}
 
static inline __attribute_const__ __u64 __fswab64(__u64 val)
{
#ifdef __arch_swab64
return __arch_swab64(val);
#elif defined(__SWAB_64_THRU_32__)
__u32 h = val >> 32;
__u32 l = val & ((1ULL << 32) - 1);
return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h)));
#else
return ___constant_swab64(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahw32(__u32 val)
{
#ifdef __arch_swahw32
return __arch_swahw32(val);
#else
return ___constant_swahw32(val);
#endif
}
 
static inline __attribute_const__ __u32 __fswahb32(__u32 val)
{
#ifdef __arch_swahb32
return __arch_swahb32(val);
#else
return ___constant_swahb32(val);
#endif
}
 
/**
* __swab16 - return a byteswapped 16-bit value
* @x: value to byteswap
*/
#define __swab16(x) \
(__builtin_constant_p((__u16)(x)) ? \
___constant_swab16(x) : \
__fswab16(x))
 
/**
* __swab32 - return a byteswapped 32-bit value
* @x: value to byteswap
*/
#define __swab32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swab32(x) : \
__fswab32(x))
 
/**
* __swab64 - return a byteswapped 64-bit value
* @x: value to byteswap
*/
#define __swab64(x) \
(__builtin_constant_p((__u64)(x)) ? \
___constant_swab64(x) : \
__fswab64(x))
 
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
*
* __swahw32(0x12340000) is 0x00001234
*/
#define __swahw32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahw32(x) : \
__fswahw32(x))
 
/**
* __swahb32 - return a high and low byte-swapped 32-bit value
* @x: value to byteswap
*
* __swahb32(0x12345678) is 0x34127856
*/
#define __swahb32(x) \
(__builtin_constant_p((__u32)(x)) ? \
___constant_swahb32(x) : \
__fswahb32(x))
 
/**
* __swab16p - return a byteswapped 16-bit value from a pointer
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline __u16 __swab16p(const __u16 *p)
{
#ifdef __arch_swab16p
return __arch_swab16p(p);
#else
return __swab16(*p);
#endif
}
 
/**
* __swab32p - return a byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline __u32 __swab32p(const __u32 *p)
{
#ifdef __arch_swab32p
return __arch_swab32p(p);
#else
return __swab32(*p);
#endif
}
 
/**
* __swab64p - return a byteswapped 64-bit value from a pointer
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline __u64 __swab64p(const __u64 *p)
{
#ifdef __arch_swab64p
return __arch_swab64p(p);
#else
return __swab64(*p);
#endif
}
 
/**
* __swahw32p - return a wordswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping.
*/
static inline __u32 __swahw32p(const __u32 *p)
{
#ifdef __arch_swahw32p
return __arch_swahw32p(p);
#else
return __swahw32(*p);
#endif
}
 
/**
* __swahb32p - return a high and low byteswapped 32-bit value from a pointer
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high/low byteswapping.
*/
static inline __u32 __swahb32p(const __u32 *p)
{
#ifdef __arch_swahb32p
return __arch_swahb32p(p);
#else
return __swahb32(*p);
#endif
}
 
/**
* __swab16s - byteswap a 16-bit value in-place
* @p: pointer to a naturally-aligned 16-bit value
*/
static inline void __swab16s(__u16 *p)
{
#ifdef __arch_swab16s
__arch_swab16s(p);
#else
*p = __swab16p(p);
#endif
}
/**
* __swab32s - byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*/
static inline void __swab32s(__u32 *p)
{
#ifdef __arch_swab32s
__arch_swab32s(p);
#else
*p = __swab32p(p);
#endif
}
 
/**
* __swab64s - byteswap a 64-bit value in-place
* @p: pointer to a naturally-aligned 64-bit value
*/
static inline void __swab64s(__u64 *p)
{
#ifdef __arch_swab64s
__arch_swab64s(p);
#else
*p = __swab64p(p);
#endif
}
 
/**
* __swahw32s - wordswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahw32() for details of wordswapping
*/
static inline void __swahw32s(__u32 *p)
{
#ifdef __arch_swahw32s
__arch_swahw32s(p);
#else
*p = __swahw32p(p);
#endif
}
 
/**
* __swahb32s - high and low byteswap a 32-bit value in-place
* @p: pointer to a naturally-aligned 32-bit value
*
* See __swahb32() for details of high and low byte swapping
*/
static inline void __swahb32s(__u32 *p)
{
#ifdef __arch_swahb32s
__arch_swahb32s(p);
#else
*p = __swahb32p(p);
#endif
}
 
#ifdef __KERNEL__
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
294,6 → 18,4
# define swab64s __swab64s
# define swahw32s __swahw32s
# define swahb32s __swahb32s
#endif /* __KERNEL__ */
 
#endif /* _LINUX_SWAB_H */
/drivers/include/linux/time.h
110,6 → 110,19
return true;
}
 
static inline bool timeval_valid(const struct timeval *tv)
{
/* Dates before 1970 are bogus */
if (tv->tv_sec < 0)
return false;
 
/* Can't have more microseconds then a second */
if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
return false;
 
return true;
}
 
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 
#define CURRENT_TIME (current_kernel_time())
/drivers/include/linux/time64.h
2,6 → 2,7
#define _LINUX_TIME64_H
 
#include <uapi/linux/time.h>
#include <linux/math64.h>
 
typedef __s64 time64_t;
 
11,11 → 12,18
*/
#if __BITS_PER_LONG == 64
# define timespec64 timespec
#define itimerspec64 itimerspec
#else
struct timespec64 {
time64_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};
 
struct itimerspec64 {
struct timespec64 it_interval;
struct timespec64 it_value;
};
 
#endif
 
/* Parameters used to convert the timespec values: */
28,6 → 36,7
#define FSEC_PER_SEC 1000000000000000LL
 
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
 
43,6 → 52,16
return ts;
}
 
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
return *its64;
}
 
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
return *its;
}
 
# define timespec64_equal timespec_equal
# define timespec64_compare timespec_compare
# define set_normalized_timespec64 set_normalized_timespec
75,6 → 94,24
return ret;
}
 
static inline struct itimerspec itimerspec64_to_itimerspec(struct itimerspec64 *its64)
{
struct itimerspec ret;
 
ret.it_interval = timespec64_to_timespec(its64->it_interval);
ret.it_value = timespec64_to_timespec(its64->it_value);
return ret;
}
 
static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *its)
{
struct itimerspec64 ret;
 
ret.it_interval = timespec_to_timespec64(its->it_interval);
ret.it_value = timespec_to_timespec64(its->it_value);
return ret;
}
 
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
/drivers/include/linux/types.h
135,26 → 135,25
#endif
 
/*
* The type of an index into the pagecache. Use a #define so asm/types.h
* can override it.
* The type of an index into the pagecache.
*/
#ifndef pgoff_t
#define pgoff_t unsigned long
#endif
 
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
/*
* A dma_addr_t can hold any valid DMA address, i.e., any address returned
* by the DMA API.
*
* If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
* bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
* but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
* so they don't care about the size of the actual bus addresses.
*/
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif /* dma_addr_t */
#endif
 
#ifdef __CHECKER__
#else
#endif
#ifdef __CHECK_ENDIAN__
#else
#endif
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
206,12 → 205,32
* struct callback_head - callback structure for use with RCU and task_work
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
*
* The struct is aligned to size of pointer. On most architectures it happens
* naturally due ABI requirements, but some architectures (like CRIS) have
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bits 0 and 1 of @next will be
* clear under normal conditions -- as long as we use call_rcu(),
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
* - the structure shares storage spacer in struct page with @compound_head,
* which encode PageTail() in bit 0. The guarantee is needed to avoid
* false-positive PageTail().
*/
struct callback_head {
struct callback_head *next;
void (*func)(struct callback_head *head);
};
} __attribute__((aligned(sizeof(void *))));
#define rcu_head callback_head
 
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
 
/* clocksource cycle base type */
typedef u64 cycle_t;
 
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
/drivers/include/linux/vgaarb.h
65,8 → 65,13
* out of the arbitration process (and can be safe to take
* interrupts at any time.
*/
#if defined(CONFIG_VGA_ARB)
extern void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes);
#else
static inline void vga_set_legacy_decoding(struct pci_dev *pdev,
unsigned int decodes) { };
#endif
 
/**
* vga_get - acquire & locks VGA resources