/drivers/include/linux/uapi/drm/vmwgfx_drm.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/drm.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/drm_fourcc.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/drm_mode.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/i915_drm.h |
---|
File deleted |
/drivers/include/linux/uapi/drm/radeon_drm.h |
---|
File deleted |
/drivers/include/linux/asm/string_32.h |
---|
File deleted |
/drivers/include/linux/asm/atomic.h |
---|
File deleted |
/drivers/include/linux/asm/alternative.h |
---|
File deleted |
/drivers/include/linux/asm/atomic_32.h |
---|
File deleted |
/drivers/include/linux/asm/asm.h |
---|
File deleted |
/drivers/include/linux/asm/posix_types.h |
---|
File deleted |
/drivers/include/linux/asm/bitsperlong.h |
---|
File deleted |
/drivers/include/linux/asm/cmpxchg.h |
---|
File deleted |
/drivers/include/linux/asm/posix_types_32.h |
---|
File deleted |
/drivers/include/linux/asm/required-features.h |
---|
File deleted |
/drivers/include/linux/asm/swab.h |
---|
File deleted |
/drivers/include/linux/asm/div64.h |
---|
File deleted |
/drivers/include/linux/asm/cmpxchg_32.h |
---|
File deleted |
/drivers/include/linux/asm/byteorder.h |
---|
File deleted |
/drivers/include/linux/asm/bitops.h |
---|
File deleted |
/drivers/include/linux/asm/scatterlist.h |
---|
File deleted |
/drivers/include/linux/asm/cpufeature.h |
---|
File deleted |
/drivers/include/linux/asm/types.h |
---|
File deleted |
/drivers/include/linux/asm/string.h |
---|
File deleted |
/drivers/include/linux/asm/spinlock_types.h |
---|
File deleted |
/drivers/include/linux/asm/unaligned.h |
---|
File deleted |
/drivers/include/linux/asm |
---|
Property changes: |
Deleted: svn:ignore |
-*.o |
-*.obj |
/drivers/include/linux/asm-generic/types.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitsperlong.h |
---|
File deleted |
/drivers/include/linux/asm-generic/int-ll64.h |
---|
File deleted |
/drivers/include/linux/asm-generic/atomic-long.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/sched.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/fls64.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/hweight.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/le.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/ext2-non-atomic.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops/minix.h |
---|
File deleted |
/drivers/include/linux/asm-generic/bitops |
---|
Property changes: |
Deleted: svn:ignore |
-*.o |
-*.obj |
/drivers/include/linux/asm-generic |
---|
Property changes: |
Deleted: svn:ignore |
-*.o |
-*.obj |
/drivers/include/linux/agp_backend.h |
---|
0,0 → 1,109 |
/* |
* AGPGART backend specific includes. Not for userspace consumption. |
* |
* Copyright (C) 2004 Silicon Graphics, Inc. |
* Copyright (C) 2002-2003 Dave Jones |
* Copyright (C) 1999 Jeff Hartmann |
* Copyright (C) 1999 Precision Insight, Inc. |
* Copyright (C) 1999 Xi Graphics, Inc. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included |
* in all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE |
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
* |
*/ |
#ifndef _AGP_BACKEND_H |
#define _AGP_BACKEND_H 1 |
#include <linux/list.h> |
enum chipset_type { |
NOT_SUPPORTED, |
SUPPORTED, |
}; |
struct agp_version { |
u16 major; |
u16 minor; |
}; |
struct agp_kern_info { |
struct agp_version version; |
struct pci_dev *device; |
enum chipset_type chipset; |
unsigned long mode; |
unsigned long aper_base; |
size_t aper_size; |
int max_memory; /* In pages */ |
int current_memory; |
bool cant_use_aperture; |
unsigned long page_mask; |
const struct vm_operations_struct *vm_ops; |
}; |
/* |
* The agp_memory structure has information about the block of agp memory |
* allocated. A caller may manipulate the next and prev pointers to link |
* each allocated item into a list. These pointers are ignored by the backend. |
* Everything else should never be written to, but the caller may read any of |
* the items to determine the status of this block of agp memory. |
*/ |
struct agp_bridge_data; |
struct agp_memory { |
struct agp_memory *next; |
struct agp_memory *prev; |
struct agp_bridge_data *bridge; |
struct page **pages; |
size_t page_count; |
int key; |
int num_scratch_pages; |
off_t pg_start; |
u32 type; |
u32 physical; |
bool is_bound; |
bool is_flushed; |
/* list of agp_memory mapped to the aperture */ |
struct list_head mapped_list; |
/* DMA-mapped addresses */ |
struct scatterlist *sg_list; |
int num_sg; |
}; |
#define AGP_NORMAL_MEMORY 0 |
#define AGP_USER_TYPES (1 << 16) |
#define AGP_USER_MEMORY (AGP_USER_TYPES) |
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
extern struct agp_bridge_data *agp_bridge; |
extern struct list_head agp_bridges; |
extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *); |
extern void agp_free_memory(struct agp_memory *); |
extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32); |
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); |
extern int agp_bind_memory(struct agp_memory *, off_t); |
extern int agp_unbind_memory(struct agp_memory *); |
extern void agp_enable(struct agp_bridge_data *, u32); |
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); |
extern void agp_backend_release(struct agp_bridge_data *); |
#endif /* _AGP_BACKEND_H */ |
/drivers/include/linux/async.h |
---|
0,0 → 1,50 |
/* |
* async.h: Asynchronous function calls for boot performance |
* |
* (C) Copyright 2009 Intel Corporation |
* Author: Arjan van de Ven <arjan@linux.intel.com> |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License |
* as published by the Free Software Foundation; version 2 |
* of the License. |
*/ |
#ifndef __ASYNC_H__ |
#define __ASYNC_H__ |
#include <linux/types.h> |
#include <linux/list.h> |
typedef u64 async_cookie_t; |
typedef void (*async_func_t) (void *data, async_cookie_t cookie); |
struct async_domain { |
struct list_head pending; |
unsigned registered:1; |
}; |
/* |
* domain participates in global async_synchronize_full |
*/ |
#define ASYNC_DOMAIN(_name) \ |
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ |
.registered = 1 } |
/* |
* domain is free to go out of scope as soon as all pending work is |
* complete, this domain does not participate in async_synchronize_full |
*/ |
#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ |
struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ |
.registered = 0 } |
extern async_cookie_t async_schedule(async_func_t func, void *data); |
extern async_cookie_t async_schedule_domain(async_func_t func, void *data, |
struct async_domain *domain); |
void async_unregister_domain(struct async_domain *domain); |
extern void async_synchronize_full(void); |
extern void async_synchronize_full_domain(struct async_domain *domain); |
extern void async_synchronize_cookie(async_cookie_t cookie); |
extern void async_synchronize_cookie_domain(async_cookie_t cookie, |
struct async_domain *domain); |
extern bool current_is_async(void); |
#endif |
/drivers/include/linux/atomic.h |
---|
0,0 → 1,131 |
/* Atomic operations usable in machine independent code */ |
#ifndef _LINUX_ATOMIC_H |
#define _LINUX_ATOMIC_H |
#include <asm/atomic.h> |
/** |
* atomic_add_unless - add unless the number is already a given value |
* @v: pointer of type atomic_t |
* @a: the amount to add to v... |
* @u: ...unless v is equal to u. |
* |
* Atomically adds @a to @v, so long as @v was not already @u. |
* Returns non-zero if @v was not @u, and zero otherwise. |
*/ |
static inline int atomic_add_unless(atomic_t *v, int a, int u) |
{ |
return __atomic_add_unless(v, a, u) != u; |
} |
/** |
* atomic_inc_not_zero - increment unless the number is zero |
* @v: pointer of type atomic_t |
* |
* Atomically increments @v by 1, so long as @v is non-zero. |
* Returns non-zero if @v was non-zero, and zero otherwise. |
*/ |
#ifndef atomic_inc_not_zero |
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
#endif |
/** |
* atomic_inc_not_zero_hint - increment if not null |
* @v: pointer of type atomic_t |
* @hint: probable value of the atomic before the increment |
* |
* This version of atomic_inc_not_zero() gives a hint of probable |
* value of the atomic. This helps processor to not read the memory |
* before doing the atomic read/modify/write cycle, lowering |
* number of bus transactions on some arches. |
* |
* Returns: 0 if increment was not done, 1 otherwise. |
*/ |
#ifndef atomic_inc_not_zero_hint |
static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint) |
{ |
int val, c = hint; |
/* sanity test, should be removed by compiler if hint is a constant */ |
if (!hint) |
return atomic_inc_not_zero(v); |
do { |
val = atomic_cmpxchg(v, c, c + 1); |
if (val == c) |
return 1; |
c = val; |
} while (c); |
return 0; |
} |
#endif |
#ifndef atomic_inc_unless_negative |
static inline int atomic_inc_unless_negative(atomic_t *p) |
{ |
int v, v1; |
for (v = 0; v >= 0; v = v1) { |
v1 = atomic_cmpxchg(p, v, v + 1); |
if (likely(v1 == v)) |
return 1; |
} |
return 0; |
} |
#endif |
#ifndef atomic_dec_unless_positive |
static inline int atomic_dec_unless_positive(atomic_t *p) |
{ |
int v, v1; |
for (v = 0; v <= 0; v = v1) { |
v1 = atomic_cmpxchg(p, v, v - 1); |
if (likely(v1 == v)) |
return 1; |
} |
return 0; |
} |
#endif |
/* |
* atomic_dec_if_positive - decrement by 1 if old value positive |
* @v: pointer of type atomic_t |
* |
* The function returns the old value of *v minus 1, even if |
* the atomic variable, v, was not decremented. |
*/ |
#ifndef atomic_dec_if_positive |
static inline int atomic_dec_if_positive(atomic_t *v) |
{ |
int c, old, dec; |
c = atomic_read(v); |
for (;;) { |
dec = c - 1; |
if (unlikely(dec < 0)) |
break; |
old = atomic_cmpxchg((v), c, dec); |
if (likely(old == c)) |
break; |
c = old; |
} |
return dec; |
} |
#endif |
#ifndef CONFIG_ARCH_HAS_ATOMIC_OR |
static inline void atomic_or(int i, atomic_t *v) |
{ |
int old; |
int new; |
do { |
old = atomic_read(v); |
new = old | i; |
} while (atomic_cmpxchg(v, old, new) != old); |
} |
#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */ |
#include <asm-generic/atomic-long.h> |
#ifdef CONFIG_GENERIC_ATOMIC64 |
#include <asm-generic/atomic64.h> |
#endif |
#endif /* _LINUX_ATOMIC_H */ |
/drivers/include/linux/bitmap.h |
---|
45,6 → 45,7 |
* bitmap_set(dst, pos, nbits) Set specified bit area |
* bitmap_clear(dst, pos, nbits) Clear specified bit area |
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above |
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
60,6 → 61,7 |
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
* bitmap_release_region(bitmap, pos, order) Free specified bit region |
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
* bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex |
*/ |
/* |
114,12 → 116,37 |
extern void bitmap_set(unsigned long *map, unsigned int start, int len); |
extern void bitmap_clear(unsigned long *map, unsigned int start, int len); |
extern unsigned long bitmap_find_next_zero_area(unsigned long *map, |
extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask); |
unsigned long align_mask, |
unsigned long align_offset); |
/** |
* bitmap_find_next_zero_area - find a contiguous aligned zero area |
* @map: The address to base the search on |
* @size: The bitmap size in bits |
* @start: The bitnumber to start searching at |
* @nr: The number of zeroed bits we're looking for |
* @align_mask: Alignment mask for zero area |
* |
* The @align_mask should be one less than a power of 2; the effect is that |
* the bit offset of all zero areas this function finds is multiples of that |
* power of 2. A @align_mask of 0 means no alignment is required. |
*/ |
static inline unsigned long |
bitmap_find_next_zero_area(unsigned long *map, |
unsigned long size, |
unsigned long start, |
unsigned int nr, |
unsigned long align_mask) |
{ |
return bitmap_find_next_zero_area_off(map, size, start, nr, |
align_mask, 0); |
} |
extern int bitmap_scnprintf(char *buf, unsigned int len, |
const unsigned long *src, int nbits); |
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
145,6 → 172,8 |
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); |
extern int bitmap_print_to_pagebuf(bool list, char *buf, |
const unsigned long *maskp, int nmaskbits); |
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) |
#define BITMAP_LAST_WORD_MASK(nbits) \ |
/drivers/include/linux/bitops.h |
---|
18,9 → 18,12 |
* position @h. For example |
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
*/ |
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK(h, l) \ |
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) |
#define GENMASK_ULL(h, l) \ |
(((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) |
extern unsigned int __sw_hweight8(unsigned int w); |
extern unsigned int __sw_hweight16(unsigned int w); |
extern unsigned int __sw_hweight32(unsigned int w); |
/drivers/include/linux/bug.h |
---|
1,15 → 1,8 |
#ifndef _ASM_GENERIC_BUG_H |
#define _ASM_GENERIC_BUG_H |
//extern __printf(3, 4) |
//void warn_slowpath_fmt(const char *file, const int line, |
// const char *fmt, ...); |
//extern __printf(4, 5) |
//void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, |
// const char *fmt, ...); |
#include <linux/compiler.h> |
//extern void warn_slowpath_null(const char *file, const int line); |
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0) |
61,18 → 54,66 |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ |
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) |
/* Force a compilation error if condition is true, but also produce a |
result (of value 0 and type size_t), so the expression can be used |
e.g. in a structure initializer (or where-ever else comma expressions |
aren't permitted). */ |
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) |
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) |
#define printk_once(fmt, ...) \ |
({ \ |
static bool __print_once; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
/* |
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the |
* expression but avoids the generation of any code, even if that expression |
* has side-effects. |
*/ |
#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) |
/** |
* BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied |
* error message. |
* @condition: the condition which the compiler should know is false. |
* |
* See BUILD_BUG_ON for description. |
*/ |
#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) |
/** |
* BUILD_BUG_ON - break compile if a condition is true. |
* @condition: the condition which the compiler should know is false. |
* |
* If you have some code which relies on certain constants being equal, or |
* some other compile-time-evaluated condition, you should use BUILD_BUG_ON to |
* detect if someone changes it. |
* |
* The implementation uses gcc's reluctance to create a negative array, but gcc |
* (as of 4.4) only emits that error for obvious cases (e.g. not arguments to |
* inline functions). Luckily, in 4.3 they added the "error" function |
* attribute just for this type of case. Thus, we use a negative sized array |
* (should always create an error on gcc versions older than 4.4) and then call |
* an undefined function with the error attribute (should always create an |
* error on gcc 4.3 and later). If for some reason, neither creates a |
* compile-time error, we'll still have a link-time error, which is harder to |
* track down. |
*/ |
#ifndef __OPTIMIZE__ |
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#else |
#define BUILD_BUG_ON(condition) \ |
BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) |
#endif |
/** |
* BUILD_BUG - break compile if used. |
* |
* If you have some code that you expect the compiler to eliminate at |
* build time, you should use BUILD_BUG to detect if it is |
* unexpectedly used. |
*/ |
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") |
#define pr_warn_once(fmt, ...) \ |
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
/drivers/include/linux/cache.h |
---|
0,0 → 1,67 |
#ifndef __LINUX_CACHE_H |
#define __LINUX_CACHE_H |
#include <uapi/linux/kernel.h> |
#include <asm/cache.h> |
#ifndef L1_CACHE_ALIGN |
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES) |
#endif |
#ifndef SMP_CACHE_BYTES |
#define SMP_CACHE_BYTES L1_CACHE_BYTES |
#endif |
#ifndef __read_mostly |
#define __read_mostly |
#endif |
#ifndef ____cacheline_aligned |
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) |
#endif |
#ifndef ____cacheline_aligned_in_smp |
#ifdef CONFIG_SMP |
#define ____cacheline_aligned_in_smp ____cacheline_aligned |
#else |
#define ____cacheline_aligned_in_smp |
#endif /* CONFIG_SMP */ |
#endif |
#ifndef __cacheline_aligned |
#define __cacheline_aligned \ |
__attribute__((__aligned__(SMP_CACHE_BYTES), \ |
__section__(".data..cacheline_aligned"))) |
#endif /* __cacheline_aligned */ |
#ifndef __cacheline_aligned_in_smp |
#ifdef CONFIG_SMP |
#define __cacheline_aligned_in_smp __cacheline_aligned |
#else |
#define __cacheline_aligned_in_smp |
#endif /* CONFIG_SMP */ |
#endif |
/* |
* The maximum alignment needed for some critical structures |
* These could be inter-node cacheline sizes/L3 cacheline |
* size etc. Define this in asm/cache.h for your arch |
*/ |
#ifndef INTERNODE_CACHE_SHIFT |
#define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT |
#endif |
#if !defined(____cacheline_internodealigned_in_smp) |
#if defined(CONFIG_SMP) |
#define ____cacheline_internodealigned_in_smp \ |
__attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT)))) |
#else |
#define ____cacheline_internodealigned_in_smp |
#endif |
#endif |
#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE |
#define cache_line_size() L1_CACHE_BYTES |
#endif |
#endif /* __LINUX_CACHE_H */ |
/drivers/include/linux/compiler-gcc4.h |
---|
71,7 → 71,6 |
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 |
* |
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek. |
* Fixed in GCC 4.8.2 and later versions. |
* |
* (asm goto is automatically volatile - the naming reflects this.) |
*/ |
/drivers/include/linux/compiler.h |
---|
186,6 → 186,80 |
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
#endif |
#include <uapi/linux/types.h> |
static __always_inline void data_access_exceeds_word_size(void) |
#ifdef __compiletime_warning |
__compiletime_warning("data access exceeds word size and won't be atomic") |
#endif |
; |
static __always_inline void data_access_exceeds_word_size(void) |
{ |
} |
static __always_inline void __read_once_size(volatile void *p, void *res, int size) |
{ |
switch (size) { |
case 1: *(__u8 *)res = *(volatile __u8 *)p; break; |
case 2: *(__u16 *)res = *(volatile __u16 *)p; break; |
case 4: *(__u32 *)res = *(volatile __u32 *)p; break; |
#ifdef CONFIG_64BIT |
case 8: *(__u64 *)res = *(volatile __u64 *)p; break; |
#endif |
default: |
barrier(); |
__builtin_memcpy((void *)res, (const void *)p, size); |
data_access_exceeds_word_size(); |
barrier(); |
} |
} |
static __always_inline void __assign_once_size(volatile void *p, void *res, int size) |
{ |
switch (size) { |
case 1: *(volatile __u8 *)p = *(__u8 *)res; break; |
case 2: *(volatile __u16 *)p = *(__u16 *)res; break; |
case 4: *(volatile __u32 *)p = *(__u32 *)res; break; |
#ifdef CONFIG_64BIT |
case 8: *(volatile __u64 *)p = *(__u64 *)res; break; |
#endif |
default: |
barrier(); |
__builtin_memcpy((void *)p, (const void *)res, size); |
data_access_exceeds_word_size(); |
barrier(); |
} |
} |
/* |
* Prevent the compiler from merging or refetching reads or writes. The |
* compiler is also forbidden from reordering successive instances of |
* READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the |
* compiler is aware of some particular ordering. One way to make the |
* compiler aware of ordering is to put the two invocations of READ_ONCE, |
* ASSIGN_ONCE or ACCESS_ONCE() in different C statements. |
* |
* In contrast to ACCESS_ONCE these two macros will also work on aggregate |
* data types like structs or unions. If the size of the accessed data |
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits) |
* READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a |
* compile-time warning. |
* |
* Their two major use cases are: (1) Mediating communication between |
* process-level code and irq/NMI handlers, all running on the same CPU, |
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
* mutilate accesses that either do not require ordering or that interact |
* with an explicit memory barrier or atomic instruction that provides the |
* required ordering. |
*/ |
#define READ_ONCE(x) \ |
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) |
#define ASSIGN_ONCE(val, x) \ |
({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
/drivers/include/linux/completion.h |
---|
0,0 → 1,109 |
#ifndef __LINUX_COMPLETION_H |
#define __LINUX_COMPLETION_H |
/* |
* (C) Copyright 2001 Linus Torvalds |
* |
* Atomic wait-for-completion handler data structures. |
* See kernel/sched/completion.c for details. |
*/ |
#include <linux/wait.h> |
/* |
* struct completion - structure used to maintain state for a "completion" |
* |
* This is the opaque structure used to maintain the state for a "completion". |
* Completions currently use a FIFO to queue threads that have to wait for |
* the "completion" event. |
* |
* See also: complete(), wait_for_completion() (and friends _timeout, |
* _interruptible, _interruptible_timeout, and _killable), init_completion(), |
* reinit_completion(), and macros DECLARE_COMPLETION(), |
* DECLARE_COMPLETION_ONSTACK(). |
*/ |
struct completion { |
unsigned int done; |
wait_queue_head_t wait; |
}; |
#define COMPLETION_INITIALIZER(work) \ |
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
#define COMPLETION_INITIALIZER_ONSTACK(work) \ |
({ init_completion(&work); work; }) |
/** |
* DECLARE_COMPLETION - declare and initialize a completion structure |
* @work: identifier for the completion structure |
* |
* This macro declares and initializes a completion structure. Generally used |
* for static declarations. You should use the _ONSTACK variant for automatic |
* variables. |
*/ |
#define DECLARE_COMPLETION(work) \ |
struct completion work = COMPLETION_INITIALIZER(work) |
/* |
* Lockdep needs to run a non-constant initializer for on-stack |
* completions - so we use the _ONSTACK() variant for those that |
* are on the kernel stack: |
*/ |
/** |
* DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure |
* @work: identifier for the completion structure |
* |
* This macro declares and initializes a completion structure on the kernel |
* stack. |
*/ |
#ifdef CONFIG_LOCKDEP |
# define DECLARE_COMPLETION_ONSTACK(work) \ |
struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) |
#else |
# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) |
#endif |
/** |
* init_completion - Initialize a dynamically allocated completion |
* @x: pointer to completion structure that is to be initialized |
* |
* This inline function will initialize a dynamically created completion |
* structure. |
*/ |
static inline void init_completion(struct completion *x) |
{ |
x->done = 0; |
init_waitqueue_head(&x->wait); |
} |
/** |
* reinit_completion - reinitialize a completion structure |
* @x: pointer to completion structure that is to be reinitialized |
* |
* This inline function should be used to reinitialize a completion structure so it can |
* be reused. This is especially important after complete_all() is used. |
*/ |
static inline void reinit_completion(struct completion *x) |
{ |
x->done = 0; |
} |
extern void wait_for_completion(struct completion *); |
extern void wait_for_completion_io(struct completion *); |
extern int wait_for_completion_interruptible(struct completion *x); |
extern int wait_for_completion_killable(struct completion *x); |
extern unsigned long wait_for_completion_timeout(struct completion *x, |
unsigned long timeout); |
extern unsigned long wait_for_completion_io_timeout(struct completion *x, |
unsigned long timeout); |
extern long wait_for_completion_interruptible_timeout( |
struct completion *x, unsigned long timeout); |
extern long wait_for_completion_killable_timeout( |
struct completion *x, unsigned long timeout); |
extern bool try_wait_for_completion(struct completion *x); |
extern bool completion_done(struct completion *x); |
extern void complete(struct completion *); |
extern void complete_all(struct completion *); |
#endif |
/drivers/include/linux/cpumask.h |
---|
0,0 → 1,999 |
#ifndef __LINUX_CPUMASK_H |
#define __LINUX_CPUMASK_H |
/* |
* Cpumasks provide a bitmap suitable for representing the |
* set of CPU's in a system, one bit position per CPU number. In general, |
* only nr_cpu_ids (<= NR_CPUS) bits are valid. |
*/ |
#include <linux/kernel.h> |
#include <linux/threads.h> |
#include <linux/bitmap.h> |
#include <linux/bug.h> |
typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; |
/** |
* cpumask_bits - get the bits in a cpumask |
* @maskp: the struct cpumask * |
* |
* You should only assume nr_cpu_ids bits of this mask are valid. This is |
* a macro so it's const-correct. |
*/ |
#define cpumask_bits(maskp) ((maskp)->bits) |
#if NR_CPUS == 1 |
#define nr_cpu_ids 1 |
#else |
extern int nr_cpu_ids; |
#endif |
#ifdef CONFIG_CPUMASK_OFFSTACK |
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also, |
* not all bits may be allocated. */ |
#define nr_cpumask_bits nr_cpu_ids |
#else |
#define nr_cpumask_bits NR_CPUS |
#endif |
/* |
* The following particular system cpumasks and operations manage |
* possible, present, active and online cpus. |
* |
* cpu_possible_mask- has bit 'cpu' set iff cpu is populatable |
* cpu_present_mask - has bit 'cpu' set iff cpu is populated |
* cpu_online_mask - has bit 'cpu' set iff cpu available to scheduler |
* cpu_active_mask - has bit 'cpu' set iff cpu available to migration |
* |
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online. |
* |
* The cpu_possible_mask is fixed at boot time, as the set of CPU id's |
* that it is possible might ever be plugged in at anytime during the |
* life of that system boot. The cpu_present_mask is dynamic(*), |
* representing which CPUs are currently plugged in. And |
* cpu_online_mask is the dynamic subset of cpu_present_mask, |
* indicating those CPUs available for scheduling. |
* |
* If HOTPLUG is enabled, then cpu_possible_mask is forced to have |
* all NR_CPUS bits set, otherwise it is just the set of CPUs that |
* ACPI reports present at boot. |
* |
* If HOTPLUG is enabled, then cpu_present_mask varies dynamically, |
* depending on what ACPI reports as currently plugged in, otherwise |
* cpu_present_mask is just a copy of cpu_possible_mask. |
* |
* (*) Well, cpu_present_mask is dynamic in the hotplug case. If not |
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot. |
* |
* Subtleties: |
* 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode |
* assumption that their single CPU is online. The UP |
* cpu_{online,possible,present}_masks are placebos. Changing them |
* will have no useful affect on the following num_*_cpus() |
* and cpu_*() macros in the UP case. This ugliness is a UP |
* optimization - don't waste any instructions or memory references |
* asking if you're online or how many CPUs there are if there is |
* only one CPU. |
*/ |
extern const struct cpumask *const cpu_possible_mask; |
extern const struct cpumask *const cpu_online_mask; |
extern const struct cpumask *const cpu_present_mask; |
extern const struct cpumask *const cpu_active_mask; |
#if NR_CPUS > 1 |
#define num_online_cpus() cpumask_weight(cpu_online_mask) |
#define num_possible_cpus() cpumask_weight(cpu_possible_mask) |
#define num_present_cpus() cpumask_weight(cpu_present_mask) |
#define num_active_cpus() cpumask_weight(cpu_active_mask) |
#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) |
#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) |
#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) |
#define cpu_active(cpu) cpumask_test_cpu((cpu), cpu_active_mask) |
#else |
#define num_online_cpus() 1U |
#define num_possible_cpus() 1U |
#define num_present_cpus() 1U |
#define num_active_cpus() 1U |
#define cpu_online(cpu) ((cpu) == 0) |
#define cpu_possible(cpu) ((cpu) == 0) |
#define cpu_present(cpu) ((cpu) == 0) |
#define cpu_active(cpu) ((cpu) == 0) |
#endif |
/* verify cpu argument to cpumask_* operators */ |
static inline unsigned int cpumask_check(unsigned int cpu) |
{ |
#ifdef CONFIG_DEBUG_PER_CPU_MAPS |
WARN_ON_ONCE(cpu >= nr_cpumask_bits); |
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ |
return cpu; |
} |
#if NR_CPUS == 1 |
/* Uniprocessor. Assume all masks are "1". */ |
static inline unsigned int cpumask_first(const struct cpumask *srcp) |
{ |
return 0; |
} |
/* Valid inputs for n are -1 and 0. */ |
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) |
{ |
return n+1; |
} |
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) |
{ |
return n+1; |
} |
static inline unsigned int cpumask_next_and(int n, |
const struct cpumask *srcp, |
const struct cpumask *andp) |
{ |
return n+1; |
} |
/* cpu must be a valid cpu, ie 0, so there's no other choice. */ |
static inline unsigned int cpumask_any_but(const struct cpumask *mask, |
unsigned int cpu) |
{ |
return 1; |
} |
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp) |
{ |
set_bit(0, cpumask_bits(dstp)); |
return 0; |
} |
#define for_each_cpu(cpu, mask) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
#define for_each_cpu_not(cpu, mask) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
#define for_each_cpu_and(cpu, mask, and) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and) |
#else |
/** |
* cpumask_first - get the first cpu in a cpumask |
* @srcp: the cpumask pointer |
* |
* Returns >= nr_cpu_ids if no cpus set. |
*/ |
static inline unsigned int cpumask_first(const struct cpumask *srcp) |
{ |
return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_next - get the next cpu in a cpumask |
* @n: the cpu prior to the place to search (ie. return will be > @n) |
* @srcp: the cpumask pointer |
* |
* Returns >= nr_cpu_ids if no further cpus set. |
*/ |
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp) |
{ |
/* -1 is a legal arg here. */ |
if (n != -1) |
cpumask_check(n); |
return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); |
} |
/** |
* cpumask_next_zero - get the next unset cpu in a cpumask |
* @n: the cpu prior to the place to search (ie. return will be > @n) |
* @srcp: the cpumask pointer |
* |
* Returns >= nr_cpu_ids if no further cpus unset. |
*/ |
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) |
{ |
/* -1 is a legal arg here. */ |
if (n != -1) |
cpumask_check(n); |
return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1); |
} |
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); |
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); |
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp); |
/** |
* for_each_cpu - iterate over every cpu in a mask |
* @cpu: the (optionally unsigned) integer iterator |
* @mask: the cpumask pointer |
* |
* After the loop, cpu is >= nr_cpu_ids. |
*/ |
#define for_each_cpu(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = cpumask_next((cpu), (mask)), \ |
(cpu) < nr_cpu_ids;) |
/** |
* for_each_cpu_not - iterate over every cpu in a complemented mask |
* @cpu: the (optionally unsigned) integer iterator |
* @mask: the cpumask pointer |
* |
* After the loop, cpu is >= nr_cpu_ids. |
*/ |
#define for_each_cpu_not(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = cpumask_next_zero((cpu), (mask)), \ |
(cpu) < nr_cpu_ids;) |
/** |
* for_each_cpu_and - iterate over every cpu in both masks |
* @cpu: the (optionally unsigned) integer iterator |
* @mask: the first cpumask pointer |
* @and: the second cpumask pointer |
* |
* This saves a temporary CPU mask in many places. It is equivalent to: |
* struct cpumask tmp; |
* cpumask_and(&tmp, &mask, &and); |
* for_each_cpu(cpu, &tmp) |
* ... |
* |
* After the loop, cpu is >= nr_cpu_ids. |
*/ |
#define for_each_cpu_and(cpu, mask, and) \ |
for ((cpu) = -1; \ |
(cpu) = cpumask_next_and((cpu), (mask), (and)), \ |
(cpu) < nr_cpu_ids;) |
#endif /* SMP */ |
#define CPU_BITS_NONE \ |
{ \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ |
} |
#define CPU_BITS_CPU0 \ |
{ \ |
[0] = 1UL \ |
} |
/** |
* cpumask_set_cpu - set a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) |
{ |
set_bit(cpumask_check(cpu), cpumask_bits(dstp)); |
} |
/** |
* cpumask_clear_cpu - clear a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp) |
{ |
clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); |
} |
/** |
* cpumask_test_cpu - test for a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @cpumask: the cpumask pointer |
* |
* Returns 1 if @cpu is set in @cpumask, else returns 0 |
* |
* No static inline type checking - see Subtlety (1) above. |
*/ |
#define cpumask_test_cpu(cpu, cpumask) \ |
test_bit(cpumask_check(cpu), cpumask_bits((cpumask))) |
/** |
* cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @cpumask: the cpumask pointer |
* |
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 |
* |
* test_and_set_bit wrapper for cpumasks. |
*/ |
static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) |
{ |
return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask)); |
} |
/** |
* cpumask_test_and_clear_cpu - atomically test and clear a cpu in a cpumask |
* @cpu: cpu number (< nr_cpu_ids) |
* @cpumask: the cpumask pointer |
* |
* Returns 1 if @cpu is set in old bitmap of @cpumask, else returns 0 |
* |
* test_and_clear_bit wrapper for cpumasks. |
*/ |
static inline int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) |
{ |
return test_and_clear_bit(cpumask_check(cpu), cpumask_bits(cpumask)); |
} |
/** |
* cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_setall(struct cpumask *dstp) |
{ |
bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask |
* @dstp: the cpumask pointer |
*/ |
static inline void cpumask_clear(struct cpumask *dstp) |
{ |
bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_and - *dstp = *src1p & *src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
* |
* If *@dstp is empty, returns 0, else returns 1 |
*/ |
static inline int cpumask_and(struct cpumask *dstp, |
const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_or - *dstp = *src1p | *src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_xor - *dstp = *src1p ^ *src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline void cpumask_xor(struct cpumask *dstp, |
const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_andnot - *dstp = *src1p & ~*src2p |
* @dstp: the cpumask result |
* @src1p: the first input |
* @src2p: the second input |
* |
* If *@dstp is empty, returns 0, else returns 1 |
*/ |
static inline int cpumask_andnot(struct cpumask *dstp, |
const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p), |
cpumask_bits(src2p), nr_cpumask_bits); |
} |
/** |
* cpumask_complement - *dstp = ~*srcp |
* @dstp: the cpumask result |
* @srcp: the input to invert |
*/ |
static inline void cpumask_complement(struct cpumask *dstp, |
const struct cpumask *srcp) |
{ |
bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp), |
nr_cpumask_bits); |
} |
/** |
* cpumask_equal - *src1p == *src2p |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline bool cpumask_equal(const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p), |
nr_cpumask_bits); |
} |
/** |
* cpumask_intersects - (*src1p & *src2p) != 0 |
* @src1p: the first input |
* @src2p: the second input |
*/ |
static inline bool cpumask_intersects(const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p), |
nr_cpumask_bits); |
} |
/** |
* cpumask_subset - (*src1p & ~*src2p) == 0 |
* @src1p: the first input |
* @src2p: the second input |
* |
* Returns 1 if *@src1p is a subset of *@src2p, else returns 0 |
*/ |
static inline int cpumask_subset(const struct cpumask *src1p, |
const struct cpumask *src2p) |
{ |
return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p), |
nr_cpumask_bits); |
} |
/** |
* cpumask_empty - *srcp == 0 |
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear. |
*/ |
static inline bool cpumask_empty(const struct cpumask *srcp) |
{ |
return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_full - *srcp == 0xFFFFFFFF... |
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set. |
*/ |
static inline bool cpumask_full(const struct cpumask *srcp) |
{ |
return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_weight - Count of bits in *srcp |
* @srcp: the cpumask to count bits (< nr_cpu_ids) in. |
*/ |
static inline unsigned int cpumask_weight(const struct cpumask *srcp) |
{ |
return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_shift_right - *dstp = *srcp >> n |
* @dstp: the cpumask result |
* @srcp: the input to shift |
* @n: the number of bits to shift by |
*/ |
static inline void cpumask_shift_right(struct cpumask *dstp, |
const struct cpumask *srcp, int n) |
{ |
bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n, |
nr_cpumask_bits); |
} |
/** |
* cpumask_shift_left - *dstp = *srcp << n |
* @dstp: the cpumask result |
* @srcp: the input to shift |
* @n: the number of bits to shift by |
*/ |
static inline void cpumask_shift_left(struct cpumask *dstp, |
const struct cpumask *srcp, int n) |
{ |
bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n, |
nr_cpumask_bits); |
} |
/** |
* cpumask_copy - *dstp = *srcp |
* @dstp: the result |
* @srcp: the input cpumask |
*/ |
static inline void cpumask_copy(struct cpumask *dstp, |
const struct cpumask *srcp) |
{ |
bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_any - pick a "random" cpu from *srcp |
* @srcp: the input cpumask |
* |
* Returns >= nr_cpu_ids if no cpus set. |
*/ |
#define cpumask_any(srcp) cpumask_first(srcp) |
/** |
* cpumask_first_and - return the first cpu from *srcp1 & *srcp2 |
* @src1p: the first input |
* @src2p: the second input |
* |
* Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and(). |
*/ |
#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p)) |
/** |
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2 |
* @mask1: the first input cpumask |
* @mask2: the second input cpumask |
* |
* Returns >= nr_cpu_ids if no cpus set. |
*/ |
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2)) |
/** |
* cpumask_of - the cpumask containing just a given cpu |
* @cpu: the cpu (<= nr_cpu_ids) |
*/ |
#define cpumask_of(cpu) (get_cpu_mask(cpu)) |
/** |
* cpumask_scnprintf - print a cpumask into a string as comma-separated hex |
* @buf: the buffer to sprintf into |
* @len: the length of the buffer |
* @srcp: the cpumask to print |
* |
* If len is zero, returns zero. Otherwise returns the length of the |
* (nul-terminated) @buf string. |
*/ |
static inline int cpumask_scnprintf(char *buf, int len, |
const struct cpumask *srcp) |
{ |
return bitmap_scnprintf(buf, len, cpumask_bits(srcp), nr_cpumask_bits); |
} |
/** |
* cpumask_parse_user - extract a cpumask from a user string |
* @buf: the buffer to extract from |
* @len: the length of the buffer |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpumask_parse_user(const char __user *buf, int len, |
struct cpumask *dstp) |
{ |
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_parselist_user - extract a cpumask from a user string |
* @buf: the buffer to extract from |
* @len: the length of the buffer |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpumask_parselist_user(const char __user *buf, int len, |
struct cpumask *dstp) |
{ |
return bitmap_parselist_user(buf, len, cpumask_bits(dstp), |
nr_cpumask_bits); |
} |
/** |
* cpulist_scnprintf - print a cpumask into a string as comma-separated list |
* @buf: the buffer to sprintf into |
* @len: the length of the buffer |
* @srcp: the cpumask to print |
* |
* If len is zero, returns zero. Otherwise returns the length of the |
* (nul-terminated) @buf string. |
*/ |
static inline int cpulist_scnprintf(char *buf, int len, |
const struct cpumask *srcp) |
{ |
return bitmap_scnlistprintf(buf, len, cpumask_bits(srcp), |
nr_cpumask_bits); |
} |
/** |
* cpumask_parse - extract a cpumask from from a string |
* @buf: the buffer to extract from |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpumask_parse(const char *buf, struct cpumask *dstp) |
{ |
char *nl = strchr(buf, '\n'); |
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); |
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpulist_parse - extract a cpumask from a user string of ranges |
* @buf: the buffer to extract from |
* @dstp: the cpumask to set. |
* |
* Returns -errno, or 0 for success. |
*/ |
static inline int cpulist_parse(const char *buf, struct cpumask *dstp) |
{ |
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); |
} |
/** |
* cpumask_size - size to allocate for a 'struct cpumask' in bytes |
* |
* This will eventually be a runtime variable, depending on nr_cpu_ids. |
*/ |
static inline size_t cpumask_size(void) |
{ |
/* FIXME: Once all cpumask assignments are eliminated, this |
* can be nr_cpumask_bits */ |
return BITS_TO_LONGS(NR_CPUS) * sizeof(long); |
} |
/* |
* cpumask_var_t: struct cpumask for stack usage. |
* |
* Oh, the wicked games we play! In order to make kernel coding a |
* little more difficult, we typedef cpumask_var_t to an array or a |
* pointer: doing &mask on an array is a noop, so it still works. |
* |
* ie. |
* cpumask_var_t tmpmask; |
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) |
* return -ENOMEM; |
* |
* ... use 'tmpmask' like a normal struct cpumask * ... |
* |
* free_cpumask_var(tmpmask); |
* |
* |
* However, one notable exception is there. alloc_cpumask_var() allocates |
* only nr_cpumask_bits bits (in the other hand, real cpumask_t always has |
* NR_CPUS bits). Therefore you don't have to dereference cpumask_var_t. |
* |
* cpumask_var_t tmpmask; |
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) |
* return -ENOMEM; |
* |
* var = *tmpmask; |
* |
* This code makes NR_CPUS length memcopy and brings to a memory corruption. |
* cpumask_copy() provide safe copy functionality. |
* |
* Note that there is another evil here: If you define a cpumask_var_t |
* as a percpu variable then the way to obtain the address of the cpumask |
* structure differently influences what this_cpu_* operation needs to be |
* used. Please use this_cpu_cpumask_var_t in those cases. The direct use |
* of this_cpu_ptr() or this_cpu_read() will lead to failures when the |
* other type of cpumask_var_t implementation is configured. |
*/ |
#ifdef CONFIG_CPUMASK_OFFSTACK |
typedef struct cpumask *cpumask_var_t; |
#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x) |
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
void alloc_bootmem_cpumask_var(cpumask_var_t *mask); |
void free_cpumask_var(cpumask_var_t mask); |
void free_bootmem_cpumask_var(cpumask_var_t mask); |
#else |
typedef struct cpumask cpumask_var_t[1]; |
#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x) |
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
{ |
return true; |
} |
static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, |
int node) |
{ |
return true; |
} |
static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) |
{ |
cpumask_clear(*mask); |
return true; |
} |
static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, |
int node) |
{ |
cpumask_clear(*mask); |
return true; |
} |
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
{ |
} |
static inline void free_cpumask_var(cpumask_var_t mask) |
{ |
} |
static inline void free_bootmem_cpumask_var(cpumask_var_t mask) |
{ |
} |
#endif /* CONFIG_CPUMASK_OFFSTACK */ |
/* It's common to want to use cpu_all_mask in struct member initializers, |
* so it has to refer to an address rather than a pointer. */ |
extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS); |
#define cpu_all_mask to_cpumask(cpu_all_bits) |
/* First bits of cpu_bit_bitmap are in fact unset. */ |
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0]) |
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask) |
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask) |
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) |
/* Wrappers for arch boot code to manipulate normally-constant masks */ |
void set_cpu_possible(unsigned int cpu, bool possible); |
void set_cpu_present(unsigned int cpu, bool present); |
void set_cpu_online(unsigned int cpu, bool online); |
void set_cpu_active(unsigned int cpu, bool active); |
void init_cpu_present(const struct cpumask *src); |
void init_cpu_possible(const struct cpumask *src); |
void init_cpu_online(const struct cpumask *src); |
/** |
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * |
* @bitmap: the bitmap |
* |
* There are a few places where cpumask_var_t isn't appropriate and |
* static cpumasks must be used (eg. very early boot), yet we don't |
* expose the definition of 'struct cpumask'. |
* |
* This does the conversion, and can be used as a constant initializer. |
*/ |
#define to_cpumask(bitmap) \ |
((struct cpumask *)(1 ? (bitmap) \ |
: (void *)sizeof(__check_is_bitmap(bitmap)))) |
static inline int __check_is_bitmap(const unsigned long *bitmap) |
{ |
return 1; |
} |
/* |
* Special-case data structure for "single bit set only" constant CPU masks. |
* |
* We pre-generate all the 64 (or 32) possible bit positions, with enough |
* padding to the left and the right, and return the constant pointer |
* appropriately offset. |
*/ |
extern const unsigned long |
cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)]; |
static inline const struct cpumask *get_cpu_mask(unsigned int cpu) |
{ |
const unsigned long *p = cpu_bit_bitmap[1 + cpu % BITS_PER_LONG]; |
p -= cpu / BITS_PER_LONG; |
return to_cpumask(p); |
} |
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu)) |
#if NR_CPUS <= BITS_PER_LONG |
#define CPU_BITS_ALL \ |
{ \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} |
#else /* NR_CPUS > BITS_PER_LONG */ |
#define CPU_BITS_ALL \ |
{ \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} |
#endif /* NR_CPUS > BITS_PER_LONG */ |
/** |
* cpumap_print_to_pagebuf - copies the cpumask into the buffer either |
* as comma-separated list of cpus or hex values of cpumask |
* @list: indicates whether the cpumap must be list |
* @mask: the cpumask to copy |
* @buf: the buffer to copy into |
* |
* Returns the length of the (null-terminated) @buf string, zero if |
* nothing is copied. |
*/ |
static inline ssize_t |
cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) |
{ |
return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask), |
nr_cpumask_bits); |
} |
/* |
* |
* From here down, all obsolete. Use cpumask_ variants! |
* |
*/ |
#ifndef CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS |
#define cpumask_of_cpu(cpu) (*get_cpu_mask(cpu)) |
#define CPU_MASK_LAST_WORD BITMAP_LAST_WORD_MASK(NR_CPUS) |
#if NR_CPUS <= BITS_PER_LONG |
#define CPU_MASK_ALL \ |
(cpumask_t) { { \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} } |
#else |
#define CPU_MASK_ALL \ |
(cpumask_t) { { \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \ |
[BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \ |
} } |
#endif |
#define CPU_MASK_NONE \ |
(cpumask_t) { { \ |
[0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \ |
} } |
#define CPU_MASK_CPU0 \ |
(cpumask_t) { { \ |
[0] = 1UL \ |
} } |
#if NR_CPUS == 1 |
#define first_cpu(src) ({ (void)(src); 0; }) |
#define next_cpu(n, src) ({ (void)(src); 1; }) |
#define any_online_cpu(mask) 0 |
#define for_each_cpu_mask(cpu, mask) \ |
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask) |
#else /* NR_CPUS > 1 */ |
int __first_cpu(const cpumask_t *srcp); |
int __next_cpu(int n, const cpumask_t *srcp); |
#define first_cpu(src) __first_cpu(&(src)) |
#define next_cpu(n, src) __next_cpu((n), &(src)) |
#define any_online_cpu(mask) cpumask_any_and(&mask, cpu_online_mask) |
#define for_each_cpu_mask(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = next_cpu((cpu), (mask)), \ |
(cpu) < NR_CPUS; ) |
#endif /* SMP */ |
#if NR_CPUS <= 64 |
#define for_each_cpu_mask_nr(cpu, mask) for_each_cpu_mask(cpu, mask) |
#else /* NR_CPUS > 64 */ |
int __next_cpu_nr(int n, const cpumask_t *srcp); |
#define for_each_cpu_mask_nr(cpu, mask) \ |
for ((cpu) = -1; \ |
(cpu) = __next_cpu_nr((cpu), &(mask)), \ |
(cpu) < nr_cpu_ids; ) |
#endif /* NR_CPUS > 64 */ |
#define cpus_addr(src) ((src).bits) |
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) |
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp) |
{ |
set_bit(cpu, dstp->bits); |
} |
#define cpu_clear(cpu, dst) __cpu_clear((cpu), &(dst)) |
static inline void __cpu_clear(int cpu, volatile cpumask_t *dstp) |
{ |
clear_bit(cpu, dstp->bits); |
} |
#define cpus_setall(dst) __cpus_setall(&(dst), NR_CPUS) |
static inline void __cpus_setall(cpumask_t *dstp, int nbits) |
{ |
bitmap_fill(dstp->bits, nbits); |
} |
#define cpus_clear(dst) __cpus_clear(&(dst), NR_CPUS) |
static inline void __cpus_clear(cpumask_t *dstp, int nbits) |
{ |
bitmap_zero(dstp->bits, nbits); |
} |
/* No static inline type checking - see Subtlety (1) above. */ |
#define cpu_isset(cpu, cpumask) test_bit((cpu), (cpumask).bits) |
#define cpu_test_and_set(cpu, cpumask) __cpu_test_and_set((cpu), &(cpumask)) |
static inline int __cpu_test_and_set(int cpu, cpumask_t *addr) |
{ |
return test_and_set_bit(cpu, addr->bits); |
} |
#define cpus_and(dst, src1, src2) __cpus_and(&(dst), &(src1), &(src2), NR_CPUS) |
static inline int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_or(dst, src1, src2) __cpus_or(&(dst), &(src1), &(src2), NR_CPUS) |
static inline void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_xor(dst, src1, src2) __cpus_xor(&(dst), &(src1), &(src2), NR_CPUS) |
static inline void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_andnot(dst, src1, src2) \ |
__cpus_andnot(&(dst), &(src1), &(src2), NR_CPUS) |
static inline int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); |
} |
#define cpus_equal(src1, src2) __cpus_equal(&(src1), &(src2), NR_CPUS) |
static inline int __cpus_equal(const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_equal(src1p->bits, src2p->bits, nbits); |
} |
#define cpus_intersects(src1, src2) __cpus_intersects(&(src1), &(src2), NR_CPUS) |
static inline int __cpus_intersects(const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_intersects(src1p->bits, src2p->bits, nbits); |
} |
#define cpus_subset(src1, src2) __cpus_subset(&(src1), &(src2), NR_CPUS) |
static inline int __cpus_subset(const cpumask_t *src1p, |
const cpumask_t *src2p, int nbits) |
{ |
return bitmap_subset(src1p->bits, src2p->bits, nbits); |
} |
#define cpus_empty(src) __cpus_empty(&(src), NR_CPUS) |
static inline int __cpus_empty(const cpumask_t *srcp, int nbits) |
{ |
return bitmap_empty(srcp->bits, nbits); |
} |
#define cpus_weight(cpumask) __cpus_weight(&(cpumask), NR_CPUS) |
static inline int __cpus_weight(const cpumask_t *srcp, int nbits) |
{ |
return bitmap_weight(srcp->bits, nbits); |
} |
#define cpus_shift_left(dst, src, n) \ |
__cpus_shift_left(&(dst), &(src), (n), NR_CPUS) |
static inline void __cpus_shift_left(cpumask_t *dstp, |
const cpumask_t *srcp, int n, int nbits) |
{ |
bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); |
} |
#endif /* !CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS */ |
#endif /* __LINUX_CPUMASK_H */ |
/drivers/include/linux/delay.h |
---|
7,6 → 7,49 |
* Delay routines, using a pre-computed "loops_per_jiffy" value. |
*/ |
#define usleep_range(min, max) udelay(max) |
#include <linux/kernel.h> |
extern unsigned long loops_per_jiffy; |
#include <asm/delay.h> |
/* |
* Using udelay() for intervals greater than a few milliseconds can |
* risk overflow for high loops_per_jiffy (high bogomips) machines. The |
* mdelay() provides a wrapper to prevent this. For delays greater |
* than MAX_UDELAY_MS milliseconds, the wrapper is used. Architecture |
* specific values can be defined in asm-???/delay.h as an override. |
* The 2nd mdelay() definition ensures GCC will optimize away the |
* while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. |
*/ |
#ifndef MAX_UDELAY_MS |
#define MAX_UDELAY_MS 5 |
#endif |
#ifndef mdelay |
#define mdelay(n) (\ |
(__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ |
({unsigned long __ms=(n); while (__ms--) udelay(1000);})) |
#endif |
#ifndef ndelay |
static inline void ndelay(unsigned long x) |
{ |
udelay(DIV_ROUND_UP(x, 1000)); |
} |
#define ndelay(x) ndelay(x) |
#endif |
extern unsigned long lpj_fine; |
void calibrate_delay(void); |
void msleep(unsigned int msecs); |
unsigned long msleep_interruptible(unsigned int msecs); |
void usleep_range(unsigned long min, unsigned long max); |
static inline void ssleep(unsigned int seconds) |
{ |
msleep(seconds * 1000); |
} |
#endif /* defined(_LINUX_DELAY_H) */ |
/drivers/include/linux/dma-buf.h |
---|
30,6 → 30,8 |
#include <linux/list.h> |
#include <linux/dma-mapping.h> |
#include <linux/fs.h> |
#include <linux/fence.h> |
#include <linux/wait.h> |
struct device; |
struct dma_buf; |
/drivers/include/linux/err.h |
---|
4,7 → 4,7 |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <errno.h> |
#include <asm/errno.h> |
/* |
* Kernel pointers have redundant information, so we can use a |
/drivers/include/linux/errno.h |
---|
1,116 → 1,32 |
#ifndef _ASM_GENERIC_ERRNO_H |
#define _ASM_GENERIC_ERRNO_H |
#ifndef _LINUX_ERRNO_H |
#define _LINUX_ERRNO_H |
#include <errno-base.h> |
#include <uapi/linux/errno.h> |
/* |
* These should never be seen by user programs. To return one of ERESTART* |
* codes, signal_pending() MUST be set. Note that ptrace can observe these |
* at syscall exit tracing, but they will never be left for the debugged user |
* process to see. |
*/ |
#define ERESTARTSYS 512 |
#define ERESTARTNOINTR 513 |
#define ERESTARTNOHAND 514 /* restart if no handler.. */ |
#define ENOIOCTLCMD 515 /* No ioctl command */ |
#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ |
#define EPROBE_DEFER 517 /* Driver requests probe retry */ |
#define EOPENSTALE 518 /* open found a stale dentry */ |
#define EDEADLK 35 /* Resource deadlock would occur */ |
#define ENAMETOOLONG 36 /* File name too long */ |
#define ENOLCK 37 /* No record locks available */ |
#define ENOSYS 38 /* Function not implemented */ |
#define ENOTEMPTY 39 /* Directory not empty */ |
#define ELOOP 40 /* Too many symbolic links encountered */ |
#define EWOULDBLOCK EAGAIN /* Operation would block */ |
#define ENOMSG 42 /* No message of desired type */ |
#define EIDRM 43 /* Identifier removed */ |
#define ECHRNG 44 /* Channel number out of range */ |
#define EL2NSYNC 45 /* Level 2 not synchronized */ |
#define EL3HLT 46 /* Level 3 halted */ |
#define EL3RST 47 /* Level 3 reset */ |
#define ELNRNG 48 /* Link number out of range */ |
#define EUNATCH 49 /* Protocol driver not attached */ |
#define ENOCSI 50 /* No CSI structure available */ |
#define EL2HLT 51 /* Level 2 halted */ |
#define EBADE 52 /* Invalid exchange */ |
#define EBADR 53 /* Invalid request descriptor */ |
#define EXFULL 54 /* Exchange full */ |
#define ENOANO 55 /* No anode */ |
#define EBADRQC 56 /* Invalid request code */ |
#define EBADSLT 57 /* Invalid slot */ |
#define EDEADLOCK EDEADLK |
#define EBFONT 59 /* Bad font file format */ |
#define ENOSTR 60 /* Device not a stream */ |
#define ENODATA 61 /* No data available */ |
#define ETIME 62 /* Timer expired */ |
#define ENOSR 63 /* Out of streams resources */ |
#define ENONET 64 /* Machine is not on the network */ |
#define ENOPKG 65 /* Package not installed */ |
#define EREMOTE 66 /* Object is remote */ |
#define ENOLINK 67 /* Link has been severed */ |
#define EADV 68 /* Advertise error */ |
#define ESRMNT 69 /* Srmount error */ |
#define ECOMM 70 /* Communication error on send */ |
#define EPROTO 71 /* Protocol error */ |
#define EMULTIHOP 72 /* Multihop attempted */ |
#define EDOTDOT 73 /* RFS specific error */ |
#define EBADMSG 74 /* Not a data message */ |
#define EOVERFLOW 75 /* Value too large for defined data type */ |
#define ENOTUNIQ 76 /* Name not unique on network */ |
#define EBADFD 77 /* File descriptor in bad state */ |
#define EREMCHG 78 /* Remote address changed */ |
#define ELIBACC 79 /* Can not access a needed shared library */ |
#define ELIBBAD 80 /* Accessing a corrupted shared library */ |
#define ELIBSCN 81 /* .lib section in a.out corrupted */ |
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ |
#define ELIBEXEC 83 /* Cannot exec a shared library directly */ |
#define EILSEQ 84 /* Illegal byte sequence */ |
#define ERESTART 85 /* Interrupted system call should be restarted */ |
#define ESTRPIPE 86 /* Streams pipe error */ |
#define EUSERS 87 /* Too many users */ |
#define ENOTSOCK 88 /* Socket operation on non-socket */ |
#define EDESTADDRREQ 89 /* Destination address required */ |
#define EMSGSIZE 90 /* Message too long */ |
#define EPROTOTYPE 91 /* Protocol wrong type for socket */ |
#define ENOPROTOOPT 92 /* Protocol not available */ |
#define EPROTONOSUPPORT 93 /* Protocol not supported */ |
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ |
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ |
#define EPFNOSUPPORT 96 /* Protocol family not supported */ |
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ |
#define EADDRINUSE 98 /* Address already in use */ |
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ |
#define ENETDOWN 100 /* Network is down */ |
#define ENETUNREACH 101 /* Network is unreachable */ |
#define ENETRESET 102 /* Network dropped connection because of reset */ |
#define ECONNABORTED 103 /* Software caused connection abort */ |
#define ECONNRESET 104 /* Connection reset by peer */ |
#define ENOBUFS 105 /* No buffer space available */ |
#define EISCONN 106 /* Transport endpoint is already connected */ |
#define ENOTCONN 107 /* Transport endpoint is not connected */ |
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ |
#define ETOOMANYREFS 109 /* Too many references: cannot splice */ |
#define ETIMEDOUT 110 /* Connection timed out */ |
#define ECONNREFUSED 111 /* Connection refused */ |
#define EHOSTDOWN 112 /* Host is down */ |
#define EHOSTUNREACH 113 /* No route to host */ |
#define EALREADY 114 /* Operation already in progress */ |
#define EINPROGRESS 115 /* Operation now in progress */ |
#define ESTALE 116 /* Stale NFS file handle */ |
#define EUCLEAN 117 /* Structure needs cleaning */ |
#define ENOTNAM 118 /* Not a XENIX named type file */ |
#define ENAVAIL 119 /* No XENIX semaphores available */ |
#define EISNAM 120 /* Is a named type file */ |
#define EREMOTEIO 121 /* Remote I/O error */ |
#define EDQUOT 122 /* Quota exceeded */ |
#define ENOMEDIUM 123 /* No medium found */ |
#define EMEDIUMTYPE 124 /* Wrong medium type */ |
#define ECANCELED 125 /* Operation Canceled */ |
#define ENOKEY 126 /* Required key not available */ |
#define EKEYEXPIRED 127 /* Key has expired */ |
#define EKEYREVOKED 128 /* Key has been revoked */ |
#define EKEYREJECTED 129 /* Key was rejected by service */ |
/* for robust mutexes */ |
#define EOWNERDEAD 130 /* Owner died */ |
#define ENOTRECOVERABLE 131 /* State not recoverable */ |
#define ERFKILL 132 /* Operation not possible due to RF-kill */ |
/* Defined for the NFSv3 protocol */ |
#define EBADHANDLE 521 /* Illegal NFS file handle */ |
#define ENOTSYNC 522 /* Update synchronization mismatch */ |
#define EBADCOOKIE 523 /* Cookie is stale */ |
#define ENOTSUPP 524 /* Operation is not supported */ |
#define ETOOSMALL 525 /* Buffer or request is too small */ |
#define ESERVERFAULT 526 /* An untranslatable error occurred */ |
#define EBADTYPE 527 /* Type not supported by server */ |
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ |
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ |
#endif |
/drivers/include/linux/fence.h |
---|
0,0 → 1,357 |
/* |
* Fence mechanism for dma-buf to allow for asynchronous dma access |
* |
* Copyright (C) 2012 Canonical Ltd |
* Copyright (C) 2012 Texas Instruments |
* |
* Authors: |
* Rob Clark <robdclark@gmail.com> |
* Maarten Lankhorst <maarten.lankhorst@canonical.com> |
* |
* This program is free software; you can redistribute it and/or modify it |
* under the terms of the GNU General Public License version 2 as published by |
* the Free Software Foundation. |
* |
* This program is distributed in the hope that it will be useful, but WITHOUT |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
* more details. |
*/ |
#ifndef __LINUX_FENCE_H |
#define __LINUX_FENCE_H |
#include <linux/err.h> |
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/bitops.h> |
#include <linux/kref.h> |
#include <linux/sched.h> |
#include <linux/printk.h> |
#include <linux/rcupdate.h> |
struct fence; |
struct fence_ops; |
struct fence_cb; |
/** |
* struct fence - software synchronization primitive |
* @refcount: refcount for this fence |
* @ops: fence_ops associated with this fence |
* @rcu: used for releasing fence with kfree_rcu |
* @cb_list: list of all callbacks to call |
* @lock: spin_lock_irqsave used for locking |
* @context: execution context this fence belongs to, returned by |
* fence_context_alloc() |
* @seqno: the sequence number of this fence inside the execution context, |
* can be compared to decide which fence would be signaled later. |
* @flags: A mask of FENCE_FLAG_* defined below |
* @timestamp: Timestamp when the fence was signaled. |
* @status: Optional, only valid if < 0, must be set before calling |
* fence_signal, indicates that the fence has completed with an error. |
* |
* the flags member must be manipulated and read using the appropriate |
* atomic ops (bit_*), so taking the spinlock will not be needed most |
* of the time. |
* |
* FENCE_FLAG_SIGNALED_BIT - fence is already signaled |
* FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* |
* FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the |
* implementer of the fence for its own purposes. Can be used in different |
* ways by different fence implementers, so do not rely on this. |
* |
* *) Since atomic bitops are used, this is not guaranteed to be the case. |
* Particularly, if the bit was set, but fence_signal was called right |
* before this bit was set, it would have been able to set the |
* FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. |
* Adding a check for FENCE_FLAG_SIGNALED_BIT after setting |
* FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that |
* after fence_signal was called, any enable_signaling call will have either |
* been completed, or never called at all. |
*/ |
struct fence { |
struct kref refcount; |
const struct fence_ops *ops; |
struct rcu_head rcu; |
struct list_head cb_list; |
spinlock_t *lock; |
unsigned context, seqno; |
unsigned long flags; |
// ktime_t timestamp; |
int status; |
}; |
enum fence_flag_bits { |
FENCE_FLAG_SIGNALED_BIT, |
FENCE_FLAG_ENABLE_SIGNAL_BIT, |
FENCE_FLAG_USER_BITS, /* must always be last member */ |
}; |
typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); |
/** |
* struct fence_cb - callback for fence_add_callback |
* @node: used by fence_add_callback to append this struct to fence::cb_list |
* @func: fence_func_t to call |
* |
* This struct will be initialized by fence_add_callback, additional |
* data can be passed along by embedding fence_cb in another struct. |
*/ |
struct fence_cb { |
struct list_head node; |
fence_func_t func; |
}; |
/** |
* struct fence_ops - operations implemented for fence |
* @get_driver_name: returns the driver name. |
* @get_timeline_name: return the name of the context this fence belongs to. |
* @enable_signaling: enable software signaling of fence. |
* @signaled: [optional] peek whether the fence is signaled, can be null. |
* @wait: custom wait implementation, or fence_default_wait. |
* @release: [optional] called on destruction of fence, can be null |
* @fill_driver_data: [optional] callback to fill in free-form debug info |
* Returns amount of bytes filled, or -errno. |
* @fence_value_str: [optional] fills in the value of the fence as a string |
* @timeline_value_str: [optional] fills in the current value of the timeline |
* as a string |
* |
* Notes on enable_signaling: |
* For fence implementations that have the capability for hw->hw |
* signaling, they can implement this op to enable the necessary |
* irqs, or insert commands into cmdstream, etc. This is called |
* in the first wait() or add_callback() path to let the fence |
* implementation know that there is another driver waiting on |
* the signal (ie. hw->sw case). |
* |
* This function can be called called from atomic context, but not |
* from irq context, so normal spinlocks can be used. |
* |
* A return value of false indicates the fence already passed, |
* or some failure occurred that made it impossible to enable |
* signaling. True indicates successful enabling. |
* |
* fence->status may be set in enable_signaling, but only when false is |
* returned. |
* |
* Calling fence_signal before enable_signaling is called allows |
* for a tiny race window in which enable_signaling is called during, |
* before, or after fence_signal. To fight this, it is recommended |
* that before enable_signaling returns true an extra reference is |
* taken on the fence, to be released when the fence is signaled. |
* This will mean fence_signal will still be called twice, but |
* the second time will be a noop since it was already signaled. |
* |
* Notes on signaled: |
* May set fence->status if returning true. |
* |
* Notes on wait: |
* Must not be NULL, set to fence_default_wait for default implementation. |
* the fence_default_wait implementation should work for any fence, as long |
* as enable_signaling works correctly. |
* |
* Must return -ERESTARTSYS if the wait is intr = true and the wait was |
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait |
* timed out. Can also return other error values on custom implementations, |
* which should be treated as if the fence is signaled. For example a hardware |
* lockup could be reported like that. |
* |
* Notes on release: |
* Can be NULL, this function allows additional commands to run on |
* destruction of the fence. Can be called from irq context. |
* If pointer is set to NULL, kfree will get called instead. |
*/ |
struct fence_ops { |
const char * (*get_driver_name)(struct fence *fence); |
const char * (*get_timeline_name)(struct fence *fence); |
bool (*enable_signaling)(struct fence *fence); |
bool (*signaled)(struct fence *fence); |
signed long (*wait)(struct fence *fence, bool intr, signed long timeout); |
void (*release)(struct fence *fence); |
int (*fill_driver_data)(struct fence *fence, void *data, int size); |
void (*fence_value_str)(struct fence *fence, char *str, int size); |
void (*timeline_value_str)(struct fence *fence, char *str, int size); |
}; |
void fence_init(struct fence *fence, const struct fence_ops *ops, |
spinlock_t *lock, unsigned context, unsigned seqno); |
void fence_release(struct kref *kref); |
void fence_free(struct fence *fence); |
/** |
* fence_get - increases refcount of the fence |
* @fence: [in] fence to increase refcount of |
* |
* Returns the same fence, with refcount increased by 1. |
*/ |
static inline struct fence *fence_get(struct fence *fence) |
{ |
if (fence) |
kref_get(&fence->refcount); |
return fence; |
} |
/** |
* fence_get_rcu - get a fence from a reservation_object_list with rcu read lock |
* @fence: [in] fence to increase refcount of |
* |
* Function returns NULL if no refcount could be obtained, or the fence. |
*/ |
static inline struct fence *fence_get_rcu(struct fence *fence) |
{ |
if (kref_get_unless_zero(&fence->refcount)) |
return fence; |
else |
return NULL; |
} |
/** |
* fence_put - decreases refcount of the fence |
* @fence: [in] fence to reduce refcount of |
*/ |
static inline void fence_put(struct fence *fence) |
{ |
if (fence) |
kref_put(&fence->refcount, fence_release); |
} |
int fence_signal(struct fence *fence); |
int fence_signal_locked(struct fence *fence); |
signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); |
int fence_add_callback(struct fence *fence, struct fence_cb *cb, |
fence_func_t func); |
bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); |
void fence_enable_sw_signaling(struct fence *fence); |
/** |
* fence_is_signaled_locked - Return an indication if the fence is signaled yet. |
* @fence: [in] the fence to check |
* |
* Returns true if the fence was already signaled, false if not. Since this |
* function doesn't enable signaling, it is not guaranteed to ever return |
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling |
* haven't been called before. |
* |
* This function requires fence->lock to be held. |
*/ |
static inline bool |
fence_is_signaled_locked(struct fence *fence) |
{ |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return true; |
if (fence->ops->signaled && fence->ops->signaled(fence)) { |
fence_signal_locked(fence); |
return true; |
} |
return false; |
} |
/** |
* fence_is_signaled - Return an indication if the fence is signaled yet. |
* @fence: [in] the fence to check |
* |
* Returns true if the fence was already signaled, false if not. Since this |
* function doesn't enable signaling, it is not guaranteed to ever return |
* true if fence_add_callback, fence_wait or fence_enable_sw_signaling |
* haven't been called before. |
* |
* It's recommended for seqno fences to call fence_signal when the |
* operation is complete, it makes it possible to prevent issues from |
* wraparound between time of issue and time of use by checking the return |
* value of this function before calling hardware-specific wait instructions. |
*/ |
static inline bool |
fence_is_signaled(struct fence *fence) |
{ |
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) |
return true; |
if (fence->ops->signaled && fence->ops->signaled(fence)) { |
fence_signal(fence); |
return true; |
} |
return false; |
} |
/** |
* fence_later - return the chronologically later fence |
* @f1: [in] the first fence from the same context |
* @f2: [in] the second fence from the same context |
* |
* Returns NULL if both fences are signaled, otherwise the fence that would be |
* signaled last. Both fences must be from the same context, since a seqno is |
* not re-used across contexts. |
*/ |
static inline struct fence *fence_later(struct fence *f1, struct fence *f2) |
{ |
if (WARN_ON(f1->context != f2->context)) |
return NULL; |
/* |
* can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been |
* set if enable_signaling wasn't called, and enabling that here is |
* overkill. |
*/ |
if (f2->seqno - f1->seqno <= INT_MAX) |
return fence_is_signaled(f2) ? NULL : f2; |
else |
return fence_is_signaled(f1) ? NULL : f1; |
} |
signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); |
/** |
* fence_wait - sleep until the fence gets signaled |
* @fence: [in] the fence to wait on |
* @intr: [in] if true, do an interruptible wait |
* |
* This function will return -ERESTARTSYS if interrupted by a signal, |
* or 0 if the fence was signaled. Other error values may be |
* returned on custom implementations. |
* |
* Performs a synchronous wait on this fence. It is assumed the caller |
* directly or indirectly holds a reference to the fence, otherwise the |
* fence might be freed before return, resulting in undefined behavior. |
*/ |
static inline signed long fence_wait(struct fence *fence, bool intr) |
{ |
signed long ret; |
/* Since fence_wait_timeout cannot timeout with |
* MAX_SCHEDULE_TIMEOUT, only valid return values are |
* -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. |
*/ |
ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
return ret < 0 ? ret : 0; |
} |
unsigned fence_context_alloc(unsigned num); |
#define FENCE_TRACE(f, fmt, args...) \ |
do { \ |
struct fence *__ff = (f); \ |
} while (0) |
#define FENCE_WARN(f, fmt, args...) \ |
do { \ |
struct fence *__ff = (f); \ |
pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ |
##args); \ |
} while (0) |
#define FENCE_ERR(f, fmt, args...) \ |
do { \ |
struct fence *__ff = (f); \ |
pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ |
##args); \ |
} while (0) |
#endif /* __LINUX_FENCE_H */ |
/drivers/include/linux/gfp.h |
---|
0,0 → 1,239 |
#ifndef __LINUX_GFP_H |
#define __LINUX_GFP_H |
#include <linux/mmdebug.h> |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/linkage.h> |
struct vm_area_struct; |
/* Plain integer GFP bitmasks. Do not use this directly. */ |
#define ___GFP_DMA 0x01u |
#define ___GFP_HIGHMEM 0x02u |
#define ___GFP_DMA32 0x04u |
#define ___GFP_MOVABLE 0x08u |
#define ___GFP_WAIT 0x10u |
#define ___GFP_HIGH 0x20u |
#define ___GFP_IO 0x40u |
#define ___GFP_FS 0x80u |
#define ___GFP_COLD 0x100u |
#define ___GFP_NOWARN 0x200u |
#define ___GFP_REPEAT 0x400u |
#define ___GFP_NOFAIL 0x800u |
#define ___GFP_NORETRY 0x1000u |
#define ___GFP_MEMALLOC 0x2000u |
#define ___GFP_COMP 0x4000u |
#define ___GFP_ZERO 0x8000u |
#define ___GFP_NOMEMALLOC 0x10000u |
#define ___GFP_HARDWALL 0x20000u |
#define ___GFP_THISNODE 0x40000u |
#define ___GFP_RECLAIMABLE 0x80000u |
#define ___GFP_NOTRACK 0x200000u |
#define ___GFP_NO_KSWAPD 0x400000u |
#define ___GFP_OTHER_NODE 0x800000u |
#define ___GFP_WRITE 0x1000000u |
/* If the above are modified, __GFP_BITS_SHIFT may need updating */ |
/* |
* GFP bitmasks.. |
* |
* Zone modifiers (see linux/mmzone.h - low three bits) |
* |
* Do not put any conditional on these. If necessary modify the definitions |
* without the underscores and use them consistently. The definitions here may |
* be used in bit comparisons. |
*/ |
#define __GFP_DMA ((__force gfp_t)___GFP_DMA) |
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM) |
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) |
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ |
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) |
/* |
* Action modifiers - doesn't change the zoning |
* |
* __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt |
* _might_ fail. This depends upon the particular VM implementation. |
* |
* __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller |
* cannot handle allocation failures. This modifier is deprecated and no new |
* users should be added. |
* |
* __GFP_NORETRY: The VM implementation must not retry indefinitely. |
* |
* __GFP_MOVABLE: Flag that this page will be movable by the page migration |
* mechanism or reclaimed |
*/ |
#define __GFP_WAIT ((__force gfp_t)___GFP_WAIT) /* Can wait and reschedule? */ |
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) /* Should access emergency pools? */ |
#define __GFP_IO ((__force gfp_t)___GFP_IO) /* Can start physical IO? */ |
#define __GFP_FS ((__force gfp_t)___GFP_FS) /* Can call down to low-level FS? */ |
#define __GFP_COLD ((__force gfp_t)___GFP_COLD) /* Cache-cold page required */ |
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN) /* Suppress page allocation failure warning */ |
#define __GFP_REPEAT ((__force gfp_t)___GFP_REPEAT) /* See above */ |
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL) /* See above */ |
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY) /* See above */ |
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)/* Allow access to emergency reserves */ |
#define __GFP_COMP ((__force gfp_t)___GFP_COMP) /* Add compound page metadata */ |
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO) /* Return zeroed page on success */ |
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) /* Don't use emergency reserves. |
* This takes precedence over the |
* __GFP_MEMALLOC flag if both are |
* set |
*/ |
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ |
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ |
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ |
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ |
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) |
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */ |
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */ |
/* |
* This may seem redundant, but it's a way of annotating false positives vs. |
* allocations that simply cannot be supported (e.g. page tables). |
*/ |
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK) |
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */ |
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) |
/* This equals 0, but use constants in case they ever change */ |
#define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH) |
/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */ |
#define GFP_ATOMIC (__GFP_HIGH) |
#define GFP_NOIO (__GFP_WAIT) |
#define GFP_NOFS (__GFP_WAIT | __GFP_IO) |
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) |
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ |
__GFP_RECLAIMABLE) |
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) |
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) |
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) |
#define GFP_IOFS (__GFP_IO | __GFP_FS) |
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ |
__GFP_NO_KSWAPD) |
/* |
* GFP_THISNODE does not perform any reclaim, you most likely want to |
* use __GFP_THISNODE to allocate from a given node without fallback! |
*/ |
#ifdef CONFIG_NUMA |
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) |
#else |
#define GFP_THISNODE ((__force gfp_t)0) |
#endif |
/* This mask makes up all the page movable related flags */ |
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
/* Control page allocator reclaim behavior */ |
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) |
/* Control slab gfp mask during early boot */ |
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) |
/* Control allocation constraints */ |
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
/* Do not use these with a slab allocator */ |
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK) |
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some |
platforms, used as appropriate on others */ |
#define GFP_DMA __GFP_DMA |
/* 4GB DMA on some platforms */ |
#define GFP_DMA32 __GFP_DMA32 |
#ifdef CONFIG_HIGHMEM |
#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM |
#else |
#define OPT_ZONE_HIGHMEM ZONE_NORMAL |
#endif |
#ifdef CONFIG_ZONE_DMA |
#define OPT_ZONE_DMA ZONE_DMA |
#else |
#define OPT_ZONE_DMA ZONE_NORMAL |
#endif |
#ifdef CONFIG_ZONE_DMA32 |
#define OPT_ZONE_DMA32 ZONE_DMA32 |
#else |
#define OPT_ZONE_DMA32 ZONE_NORMAL |
#endif |
/* |
* GFP_ZONE_TABLE is a word size bitstring that is used for looking up the |
* zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long |
* and there are 16 of them to cover all possible combinations of |
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. |
* |
* The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. |
* But GFP_MOVABLE is not only a zone specifier but also an allocation |
* policy. Therefore __GFP_MOVABLE plus another zone selector is valid. |
* Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". |
* |
* bit result |
* ================= |
* 0x0 => NORMAL |
* 0x1 => DMA or NORMAL |
* 0x2 => HIGHMEM or NORMAL |
* 0x3 => BAD (DMA+HIGHMEM) |
* 0x4 => DMA32 or DMA or NORMAL |
* 0x5 => BAD (DMA+DMA32) |
* 0x6 => BAD (HIGHMEM+DMA32) |
* 0x7 => BAD (HIGHMEM+DMA32+DMA) |
* 0x8 => NORMAL (MOVABLE+0) |
* 0x9 => DMA or NORMAL (MOVABLE+DMA) |
* 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) |
* 0xb => BAD (MOVABLE+HIGHMEM+DMA) |
* 0xc => DMA32 (MOVABLE+DMA32) |
* 0xd => BAD (MOVABLE+DMA32+DMA) |
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM) |
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) |
* |
* ZONES_SHIFT must be <= 2 on 32 bit platforms. |
*/ |
#if 16 * ZONES_SHIFT > BITS_PER_LONG |
#error ZONES_SHIFT too large to create GFP_ZONE_TABLE integer |
#endif |
#define GFP_ZONE_TABLE ( \ |
(ZONE_NORMAL << 0 * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA << ___GFP_DMA * ZONES_SHIFT) \ |
| (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA32 << ___GFP_DMA32 * ZONES_SHIFT) \ |
| (ZONE_NORMAL << ___GFP_MOVABLE * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * ZONES_SHIFT) \ |
| (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * ZONES_SHIFT) \ |
| (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * ZONES_SHIFT) \ |
) |
/* |
* GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 |
* __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per |
* entry starting with bit 0. Bit is set if the combination is not |
* allowed. |
*/ |
#define GFP_ZONE_BAD ( \ |
1 << (___GFP_DMA | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_DMA | ___GFP_DMA32) \ |
| 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ |
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ |
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ |
| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ |
) |
#endif /* __LINUX_GFP_H */ |
/drivers/include/linux/hash.h |
---|
36,6 → 36,9 |
{ |
u64 hash = val; |
#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
hash = hash * GOLDEN_RATIO_PRIME_64; |
#else |
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
u64 n = hash; |
n <<= 18; |
50,6 → 53,7 |
hash += n; |
n <<= 2; |
hash += n; |
#endif |
/* High bits are more random, so use them. */ |
return hash >> (64 - bits); |
78,4 → 82,5 |
#endif |
return (u32)val; |
} |
#endif /* _LINUX_HASH_H */ |
/drivers/include/linux/hdmi.h |
---|
1,9 → 1,24 |
/* |
* Copyright (C) 2012 Avionic Design GmbH |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sub license, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef __LINUX_HDMI_H_ |
/drivers/include/linux/i2c.h |
---|
31,6 → 31,9 |
#include <linux/module.h> |
#include <linux/i2c-id.h> |
#include <linux/mod_devicetable.h> |
#include <linux/sched.h> /* for completion */ |
#include <linux/mutex.h> |
#include <linux/jiffies.h> |
extern struct bus_type i2c_bus_type; |
extern struct device_type i2c_adapter_type; |
139,6 → 142,8 |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
* userspace_devices list |
* @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter |
* calls it to pass on slave events to the slave driver. |
* |
* An i2c_client identifies a single device (i.e. chip) connected to an |
* i2c bus. The behaviour exposed to Linux is defined by the driver |
160,6 → 165,13 |
extern struct i2c_client *i2c_verify_client(struct device *dev); |
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); |
enum i2c_slave_event { |
I2C_SLAVE_REQ_READ_START, |
I2C_SLAVE_REQ_READ_END, |
I2C_SLAVE_REQ_WRITE_START, |
I2C_SLAVE_REQ_WRITE_END, |
I2C_SLAVE_STOP, |
}; |
/** |
* struct i2c_board_info - template for device creation |
* @type: chip type, to initialize i2c_client.name |
210,7 → 222,7 |
* to name two of the most common. |
* |
* The return codes from the @master_xfer field should indicate the type of |
* error code that occured during the transfer, as documented in the kernel |
* error code that occurred during the transfer, as documented in the kernel |
* Documentation file Documentation/i2c/fault-codes. |
*/ |
struct i2c_algorithm { |
230,6 → 242,12 |
u32 (*functionality) (struct i2c_adapter *); |
}; |
int i2c_recover_bus(struct i2c_adapter *adap); |
/* Generic recovery routines */ |
int i2c_generic_gpio_recovery(struct i2c_adapter *adap); |
int i2c_generic_scl_recovery(struct i2c_adapter *adap); |
/* |
* i2c_adapter is the structure used to identify a physical i2c bus along |
* with the access algorithms necessary to access it. |
/drivers/include/linux/idr.h |
---|
14,15 → 14,10 |
#include <syscall.h> |
#include <linux/types.h> |
#include <errno-base.h> |
#include <linux/bitops.h> |
//#include <linux/init.h> |
//#include <linux/rcupdate.h> |
#include <linux/spinlock.h> |
#include <linux/bitmap.h> |
#include <linux/bug.h> |
#include <linux/rcupdate.h> |
/* |
* We want shallower trees and thus more bits covered at each layer. 8 |
* bits gives us large enough first layer for most use cases and maximum |
/drivers/include/linux/irqflags.h |
---|
0,0 → 1,150 |
/* |
* include/linux/irqflags.h |
* |
* IRQ flags tracing: follow the state of the hardirq and softirq flags and |
* provide callbacks for transitions between ON and OFF states. |
* |
* This file gets included from lowlevel asm headers too, to provide |
* wrapped versions of the local_irq_*() APIs, based on the |
* raw_local_irq_*() macros from the lowlevel headers. |
*/ |
#ifndef _LINUX_TRACE_IRQFLAGS_H |
#define _LINUX_TRACE_IRQFLAGS_H |
#include <linux/typecheck.h> |
#include <asm/irqflags.h> |
#ifdef CONFIG_TRACE_IRQFLAGS |
extern void trace_softirqs_on(unsigned long ip); |
extern void trace_softirqs_off(unsigned long ip); |
extern void trace_hardirqs_on(void); |
extern void trace_hardirqs_off(void); |
# define trace_hardirq_context(p) ((p)->hardirq_context) |
# define trace_softirq_context(p) ((p)->softirq_context) |
# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled) |
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) |
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) |
# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) |
# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) |
# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) |
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, |
#else |
# define trace_hardirqs_on() do { } while (0) |
# define trace_hardirqs_off() do { } while (0) |
# define trace_softirqs_on(ip) do { } while (0) |
# define trace_softirqs_off(ip) do { } while (0) |
# define trace_hardirq_context(p) 0 |
# define trace_softirq_context(p) 0 |
# define trace_hardirqs_enabled(p) 0 |
# define trace_softirqs_enabled(p) 0 |
# define trace_hardirq_enter() do { } while (0) |
# define trace_hardirq_exit() do { } while (0) |
# define lockdep_softirq_enter() do { } while (0) |
# define lockdep_softirq_exit() do { } while (0) |
# define INIT_TRACE_IRQFLAGS |
#endif |
#if defined(CONFIG_IRQSOFF_TRACER) || \ |
defined(CONFIG_PREEMPT_TRACER) |
extern void stop_critical_timings(void); |
extern void start_critical_timings(void); |
#else |
# define stop_critical_timings() do { } while (0) |
# define start_critical_timings() do { } while (0) |
#endif |
/* |
* Wrap the arch provided IRQ routines to provide appropriate checks. |
*/ |
#define raw_local_irq_disable() arch_local_irq_disable() |
#define raw_local_irq_enable() arch_local_irq_enable() |
#define raw_local_irq_save(flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = arch_local_irq_save(); \ |
} while (0) |
#define raw_local_irq_restore(flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
arch_local_irq_restore(flags); \ |
} while (0) |
#define raw_local_save_flags(flags) \ |
do { \ |
typecheck(unsigned long, flags); \ |
flags = arch_local_save_flags(); \ |
} while (0) |
#define raw_irqs_disabled_flags(flags) \ |
({ \ |
typecheck(unsigned long, flags); \ |
arch_irqs_disabled_flags(flags); \ |
}) |
#define raw_irqs_disabled() (arch_irqs_disabled()) |
#define raw_safe_halt() arch_safe_halt() |
/* |
* The local_irq_*() APIs are equal to the raw_local_irq*() |
* if !TRACE_IRQFLAGS. |
*/ |
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
#define local_irq_enable() \ |
do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) |
#define local_irq_disable() \ |
do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) |
#define local_irq_save(flags) \ |
do { \ |
raw_local_irq_save(flags); \ |
trace_hardirqs_off(); \ |
} while (0) |
#define local_irq_restore(flags) \ |
do { \ |
if (raw_irqs_disabled_flags(flags)) { \ |
raw_local_irq_restore(flags); \ |
trace_hardirqs_off(); \ |
} else { \ |
trace_hardirqs_on(); \ |
raw_local_irq_restore(flags); \ |
} \ |
} while (0) |
#define local_save_flags(flags) \ |
do { \ |
raw_local_save_flags(flags); \ |
} while (0) |
#define irqs_disabled_flags(flags) \ |
({ \ |
raw_irqs_disabled_flags(flags); \ |
}) |
#define irqs_disabled() \ |
({ \ |
unsigned long _flags; \ |
raw_local_save_flags(_flags); \ |
raw_irqs_disabled_flags(_flags); \ |
}) |
#define safe_halt() \ |
do { \ |
trace_hardirqs_on(); \ |
raw_safe_halt(); \ |
} while (0) |
#else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
#define local_irq_enable() do { raw_local_irq_enable(); } while (0) |
#define local_irq_disable() do { raw_local_irq_disable(); } while (0) |
#define local_irq_save(flags) \ |
do { \ |
raw_local_irq_save(flags); \ |
} while (0) |
#define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) |
#define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) |
#define irqs_disabled() (raw_irqs_disabled()) |
#define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) |
#define safe_halt() do { raw_safe_halt(); } while (0) |
#endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
#endif |
/drivers/include/linux/jiffies.h |
---|
77,8 → 77,8 |
* without sampling the sequence number in jiffies_lock. |
* get_jiffies_64() will do this for you as appropriate. |
*/ |
extern u64 jiffies_64; |
extern unsigned long volatile jiffies; |
extern u64 __jiffy_data jiffies_64; |
extern unsigned long volatile __jiffy_data jiffies; |
#if (BITS_PER_LONG < 64) |
u64 get_jiffies_64(void); |
262,24 → 262,12 |
#define SEC_JIFFIE_SC (32 - SHIFT_HZ) |
#endif |
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) |
#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) |
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
#define USEC_CONVERSION \ |
((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ |
TICK_NSEC -1) / (u64)TICK_NSEC)) |
/* |
* USEC_ROUND is used in the timeval to jiffie conversion. See there |
* for more details. It is the scaled resolution rounding value. Note |
* that it is a 64-bit value. Since, when it is applied, we are already |
* in jiffies (albit scaled), it is nothing but the bits we will shift |
* off. |
*/ |
#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) |
/* |
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that |
* into seconds. The 64-bit case will overflow if we are not careful, |
* so use the messy SH_DIV macro to do it. Still all constants. |
325,35 → 313,6 |
extern u64 nsecs_to_jiffies64(u64 n); |
extern unsigned long nsecs_to_jiffies(u64 n); |
static unsigned long round_jiffies_common(unsigned long j, bool force_up) |
{ |
int rem; |
unsigned long original = j; |
rem = j % HZ; |
/* |
* If the target jiffie is just after a whole second (which can happen |
* due to delays of the timer irq, long irq off times etc etc) then |
* we should round down to the whole second, not up. Use 1/4th second |
* as cutoff for this rounding as an extreme upper bound for this. |
* But never round down if @force_up is set. |
*/ |
if (rem < HZ/4 && !force_up) /* round down */ |
j = j - rem; |
else /* round up */ |
j = j - rem + HZ; |
if (j <= GetTimerTicks()) /* rounding ate our timeout entirely; */ |
return original; |
return j; |
} |
unsigned long round_jiffies_up_relative(unsigned long j); |
#define TIMESTAMP_SIZE 30 |
#endif |
/drivers/include/linux/kernel.h |
---|
1,22 → 1,19 |
#ifndef _LINUX_KERNEL_H |
#define _LINUX_KERNEL_H |
/* |
* 'kernel.h' contains some often-used function prototypes etc |
*/ |
#ifdef __KERNEL__ |
#include <stdarg.h> |
#include <linux/linkage.h> |
#include <linux/stddef.h> |
#include <linux/types.h> |
#include <linux/compiler.h> |
#include <linux/bitops.h> |
#include <linux/errno.h> |
#include <linux/log2.h> |
#include <linux/typecheck.h> |
#include <linux/printk.h> |
#include <asm/byteorder.h> |
#include <uapi/linux/kernel.h> |
#define __init |
#define USHRT_MAX ((u16)(~0U)) |
#define SHRT_MAX ((s16)(USHRT_MAX>>1)) |
#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) |
44,8 → 41,12 |
#define S64_MAX ((s64)(U64_MAX>>1)) |
#define S64_MIN ((s64)(-S64_MAX - 1)) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define STACK_MAGIC 0xdeadbeef |
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) |
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask)) |
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) |
114,14 → 115,23 |
} \ |
) |
#define clamp_t(type, val, min, max) ({ \ |
type __val = (val); \ |
type __min = (min); \ |
type __max = (max); \ |
__val = __val < __min ? __min: __val; \ |
__val > __max ? __max: __val; }) |
#define _RET_IP_ (unsigned long)__builtin_return_address(0) |
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
#ifdef CONFIG_LBDAF |
# include <asm/div64.h> |
# define sector_div(a, b) do_div(a, b) |
#else |
# define sector_div(n, b)( \ |
{ \ |
int _res; \ |
_res = (n) % (b); \ |
(n) /= (b); \ |
_res; \ |
} \ |
) |
#endif |
/** |
* upper_32_bits - return bits 32-63 of a number |
140,6 → 150,23 |
#define lower_32_bits(n) ((u32)(n)) |
/* |
* abs() handles unsigned and signed longs, ints, shorts and chars. For all |
* input types abs() returns a signed long. |
* abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() |
* for those. |
*/ |
#define abs(x) ({ \ |
long ret; \ |
if (sizeof(x) == sizeof(long)) { \ |
long __x = (x); \ |
ret = (__x < 0) ? -__x : __x; \ |
} else { \ |
int __x = (x); \ |
ret = (__x < 0) ? -__x : __x; \ |
} \ |
ret; \ |
}) |
#define abs64(x) ({ \ |
s64 __x = (x); \ |
154,11 → 181,60 |
#define KERN_NOTICE "<5>" /* normal but significant condition */ |
#define KERN_INFO "<6>" /* informational */ |
#define KERN_DEBUG "<7>" /* debug-level messages */ |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list); |
extern __printf(3, 4) |
int snprintf(char *buf, size_t size, const char *fmt, ...); |
extern __printf(3, 0) |
int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); |
extern __printf(3, 4) |
int scnprintf(char *buf, size_t size, const char *fmt, ...); |
extern __printf(3, 0) |
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); |
extern __printf(2, 3) |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); |
enum lockdep_ok { |
LOCKDEP_STILL_OK, |
LOCKDEP_NOW_UNRELIABLE |
}; |
extern void add_taint(unsigned flag, enum lockdep_ok); |
extern int test_taint(unsigned flag); |
extern unsigned long get_taint(void); |
extern int root_mountflags; |
extern bool early_boot_irqs_disabled; |
/* Values used for system_state */ |
extern enum system_states { |
SYSTEM_BOOTING, |
SYSTEM_RUNNING, |
SYSTEM_HALT, |
SYSTEM_POWER_OFF, |
SYSTEM_RESTART, |
} system_state; |
#define TAINT_PROPRIETARY_MODULE 0 |
#define TAINT_FORCED_MODULE 1 |
#define TAINT_CPU_OUT_OF_SPEC 2 |
#define TAINT_FORCED_RMMOD 3 |
#define TAINT_MACHINE_CHECK 4 |
#define TAINT_BAD_PAGE 5 |
#define TAINT_USER 6 |
#define TAINT_DIE 7 |
#define TAINT_OVERRIDDEN_ACPI_TABLE 8 |
#define TAINT_WARN 9 |
#define TAINT_CRAP 10 |
#define TAINT_FIRMWARE_WORKAROUND 11 |
#define TAINT_OOT_MODULE 12 |
#define TAINT_UNSIGNED_MODULE 13 |
#define TAINT_SOFTLOCKUP 14 |
extern const char hex_asc[]; |
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)] |
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4] |
static inline char *pack_hex_byte(char *buf, u8 byte) |
static inline char *hex_byte_pack(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_hi(byte); |
*buf++ = hex_asc_lo(byte); |
165,25 → 241,223 |
return buf; |
} |
enum { |
DUMP_PREFIX_NONE, |
DUMP_PREFIX_ADDRESS, |
DUMP_PREFIX_OFFSET |
extern const char hex_asc_upper[]; |
#define hex_asc_upper_lo(x) hex_asc_upper[((x) & 0x0f)] |
#define hex_asc_upper_hi(x) hex_asc_upper[((x) & 0xf0) >> 4] |
static inline char *hex_byte_pack_upper(char *buf, u8 byte) |
{ |
*buf++ = hex_asc_upper_hi(byte); |
*buf++ = hex_asc_upper_lo(byte); |
return buf; |
} |
extern int hex_to_bin(char ch); |
extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); |
extern char *bin2hex(char *dst, const void *src, size_t count); |
bool mac_pton(const char *s, u8 *mac); |
/* |
* General tracing related utility functions - trace_printk(), |
* tracing_on/tracing_off and tracing_start()/tracing_stop |
* |
* Use tracing_on/tracing_off when you want to quickly turn on or off |
* tracing. It simply enables or disables the recording of the trace events. |
* This also corresponds to the user space /sys/kernel/debug/tracing/tracing_on |
* file, which gives a means for the kernel and userspace to interact. |
* Place a tracing_off() in the kernel where you want tracing to end. |
* From user space, examine the trace, and then echo 1 > tracing_on |
* to continue tracing. |
* |
* tracing_stop/tracing_start has slightly more overhead. It is used |
* by things like suspend to ram where disabling the recording of the |
* trace is not enough, but tracing must actually stop because things |
* like calling smp_processor_id() may crash the system. |
* |
* Most likely, you want to use tracing_on/tracing_off. |
*/ |
#ifdef CONFIG_RING_BUFFER |
/* trace_off_permanent stops recording with no way to bring it back */ |
void tracing_off_permanent(void); |
#else |
static inline void tracing_off_permanent(void) { } |
#endif |
enum ftrace_dump_mode { |
DUMP_NONE, |
DUMP_ALL, |
DUMP_ORIG, |
}; |
int hex_to_bin(char ch); |
int hex2bin(u8 *dst, const char *src, size_t count); |
#ifdef CONFIG_TRACING |
void tracing_on(void); |
void tracing_off(void); |
int tracing_is_on(void); |
void tracing_snapshot(void); |
void tracing_snapshot_alloc(void); |
extern void tracing_start(void); |
extern void tracing_stop(void); |
//int printk(const char *fmt, ...); |
static inline __printf(1, 2) |
void ____trace_printk_check_format(const char *fmt, ...) |
{ |
} |
#define __trace_printk_check_format(fmt, args...) \ |
do { \ |
if (0) \ |
____trace_printk_check_format(fmt, ##args); \ |
} while (0) |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
/** |
* trace_printk - printf formatting in the ftrace buffer |
* @fmt: the printf format for printing |
* |
* Note: __trace_printk is an internal function for trace_printk and |
* the @ip is passed in via the trace_printk macro. |
* |
* This function allows a kernel developer to debug fast path sections |
* that printk is not appropriate for. By scattering in various |
* printk like tracing in the code, a developer can quickly see |
* where problems are occurring. |
* |
* This is intended as a debugging tool for the developer only. |
* Please refrain from leaving trace_printks scattered around in |
* your code. (Extra memory is used for special buffers that are |
* allocated when trace_printk() is used) |
* |
* A little optization trick is done here. If there's only one |
* argument, there's no need to scan the string for printf formats. |
* The trace_puts() will suffice. But how can we take advantage of |
* using trace_puts() when trace_printk() has only one argument? |
* By stringifying the args and checking the size we can tell |
* whether or not there are args. __stringify((__VA_ARGS__)) will |
* turn into "()\0" with a size of 3 when there are no args, anything |
* else will be bigger. All we need to do is define a string to this, |
* and then take its size and compare to 3. If it's bigger, use |
* do_trace_printk() otherwise, optimize it to trace_puts(). Then just |
* let gcc optimize the rest. |
*/ |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
#define trace_printk(fmt, ...) \ |
do { \ |
char _______STR[] = __stringify((__VA_ARGS__)); \ |
if (sizeof(_______STR) > 3) \ |
do_trace_printk(fmt, ##__VA_ARGS__); \ |
else \ |
trace_puts(fmt); \ |
} while (0) |
#define do_trace_printk(fmt, args...) \ |
do { \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(fmt) ? fmt : NULL; \ |
\ |
__trace_printk_check_format(fmt, ##args); \ |
\ |
if (__builtin_constant_p(fmt)) \ |
__trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ |
else \ |
__trace_printk(_THIS_IP_, fmt, ##args); \ |
} while (0) |
extern __printf(2, 3) |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
int __trace_bprintk(unsigned long ip, const char *fmt, ...); |
extern __printf(2, 3) |
int __trace_printk(unsigned long ip, const char *fmt, ...); |
/** |
* trace_puts - write a string into the ftrace buffer |
* @str: the string to record |
* |
* Note: __trace_bputs is an internal function for trace_puts and |
* the @ip is passed in via the trace_puts macro. |
* |
* This is similar to trace_printk() but is made for those really fast |
* paths that a developer wants the least amount of "Heisenbug" affects, |
* where the processing of the print format is still too much. |
* |
* This function allows a kernel developer to debug fast path sections |
* that printk is not appropriate for. By scattering in various |
* printk like tracing in the code, a developer can quickly see |
* where problems are occurring. |
* |
* This is intended as a debugging tool for the developer only. |
* Please refrain from leaving trace_puts scattered around in |
* your code. (Extra memory is used for special buffers that are |
* allocated when trace_puts() is used) |
* |
* Returns: 0 if nothing was written, positive # if string was. |
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
*/ |
#define trace_puts(str) ({ \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(str) ? str : NULL; \ |
\ |
if (__builtin_constant_p(str)) \ |
__trace_bputs(_THIS_IP_, trace_printk_fmt); \ |
else \ |
__trace_puts(_THIS_IP_, str, strlen(str)); \ |
}) |
extern int __trace_bputs(unsigned long ip, const char *str); |
extern int __trace_puts(unsigned long ip, const char *str, int size); |
extern void trace_dump_stack(int skip); |
/* |
* The double __builtin_constant_p is because gcc will give us an error |
* if we try to allocate the static variable to fmt if it is not a |
* constant. Even with the outer if statement. |
*/ |
#define ftrace_vprintk(fmt, vargs) \ |
do { \ |
if (__builtin_constant_p(fmt)) { \ |
static const char *trace_printk_fmt \ |
__attribute__((section("__trace_printk_fmt"))) = \ |
__builtin_constant_p(fmt) ? fmt : NULL; \ |
\ |
__ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ |
} else \ |
__ftrace_vprintk(_THIS_IP_, fmt, vargs); \ |
} while (0) |
extern int |
__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); |
extern int |
__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); |
extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
#else |
static inline void tracing_start(void) { } |
static inline void tracing_stop(void) { } |
static inline void trace_dump_stack(int skip) { } |
static inline void tracing_on(void) { } |
static inline void tracing_off(void) { } |
static inline int tracing_is_on(void) { return 0; } |
static inline void tracing_snapshot(void) { } |
static inline void tracing_snapshot_alloc(void) { } |
static inline __printf(1, 2) |
int trace_printk(const char *fmt, ...) |
{ |
return 0; |
} |
static inline int |
ftrace_vprintk(const char *fmt, va_list ap) |
{ |
return 0; |
} |
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } |
#endif /* CONFIG_TRACING */ |
/* |
* min()/max()/clamp() macros that also do |
* strict type-checking.. See the |
* "unnecessary" pointer comparison. |
200,24 → 474,9 |
(void) (&_max1 == &_max2); \ |
_max1 > _max2 ? _max1 : _max2; }) |
#define min3(x, y, z) ({ \ |
typeof(x) _min1 = (x); \ |
typeof(y) _min2 = (y); \ |
typeof(z) _min3 = (z); \ |
(void) (&_min1 == &_min2); \ |
(void) (&_min1 == &_min3); \ |
_min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ |
(_min2 < _min3 ? _min2 : _min3); }) |
#define min3(x, y, z) min((typeof(x))min(x, y), z) |
#define max3(x, y, z) max((typeof(x))max(x, y), z) |
#define max3(x, y, z) ({ \ |
typeof(x) _max1 = (x); \ |
typeof(y) _max2 = (y); \ |
typeof(z) _max3 = (z); \ |
(void) (&_max1 == &_max2); \ |
(void) (&_max1 == &_max3); \ |
_max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ |
(_max2 > _max3 ? _max2 : _max3); }) |
/** |
* min_not_zero - return the minimum that is _not_ zero, unless both are zero |
* @x: value1 |
231,20 → 490,13 |
/** |
* clamp - return a value clamped to a given range with strict typechecking |
* @val: current value |
* @min: minimum allowable value |
* @max: maximum allowable value |
* @lo: lowest allowable value |
* @hi: highest allowable value |
* |
* This macro does strict typechecking of min/max to make sure they are of the |
* This macro does strict typechecking of lo/hi to make sure they are of the |
* same type as val. See the unnecessary pointer comparisons. |
*/ |
#define clamp(val, min, max) ({ \ |
typeof(val) __val = (val); \ |
typeof(min) __min = (min); \ |
typeof(max) __max = (max); \ |
(void) (&__val == &__min); \ |
(void) (&__val == &__max); \ |
__val = __val < __min ? __min: __val; \ |
__val > __max ? __max: __val; }) |
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
/* |
* ..and if you can't take the strict |
263,6 → 515,38 |
__max1 > __max2 ? __max1: __max2; }) |
/** |
* clamp_t - return a value clamped to a given range using a given type |
* @type: the type of variable to use |
* @val: current value |
* @lo: minimum allowable value |
* @hi: maximum allowable value |
* |
* This macro does no typechecking and uses temporary variables of type |
* 'type' to make all the comparisons. |
*/ |
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
/** |
* clamp_val - return a value clamped to a given range using val's type |
* @val: current value |
* @lo: minimum allowable value |
* @hi: maximum allowable value |
* |
* This macro does no typechecking and uses temporary variables of whatever |
* type the input argument 'val' is. This is useful when val is an unsigned |
* type and min and max are literals that will otherwise be assigned a signed |
* integer type. |
*/ |
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
/* |
* swap - swap value of @a and @b |
*/ |
#define swap(a, b) \ |
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
/** |
* container_of - cast a member of a structure out to the containing structure |
* @ptr: the pointer to the member. |
* @type: the type of the container struct this is embedded in. |
273,22 → 557,28 |
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
(type *)( (char *)__mptr - offsetof(type,member) );}) |
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */ |
#ifdef CONFIG_FTRACE_MCOUNT_RECORD |
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD |
#endif |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kzalloc(n * size, 0); |
} |
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */ |
#define VERIFY_OCTAL_PERMISSIONS(perms) \ |
(BUILD_BUG_ON_ZERO((perms) < 0) + \ |
BUILD_BUG_ON_ZERO((perms) > 0777) + \ |
/* User perms >= group perms >= other perms */ \ |
BUILD_BUG_ON_ZERO(((perms) >> 6) < (((perms) >> 3) & 7)) + \ |
BUILD_BUG_ON_ZERO((((perms) >> 3) & 7) < ((perms) & 7)) + \ |
/* Other writable? Generally considered a bad idea. */ \ |
BUILD_BUG_ON_ZERO((perms) & 2) + \ |
(perms)) |
void free (void *ptr); |
#endif /* __KERNEL__ */ |
typedef unsigned long pgprotval_t; |
typedef struct pgprot { pgprotval_t pgprot; } pgprot_t; |
struct file |
{ |
352,17 → 642,7 |
# define del_timer_sync(t) del_timer(t) |
struct timespec { |
long tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#define mb() asm volatile("mfence" : : : "memory") |
#define rmb() asm volatile("lfence" : : : "memory") |
#define wmb() asm volatile("sfence" : : : "memory") |
#define build_mmio_read(name, size, type, reg, barrier) \ |
static inline type name(const volatile void __iomem *addr) \ |
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
400,23 → 680,6 |
#define __raw_writew __writew |
#define __raw_writel __writel |
static inline __u64 readq(const volatile void __iomem *addr) |
{ |
const volatile u32 __iomem *p = addr; |
u32 low, high; |
low = readl(p); |
high = readl(p + 1); |
return low + ((u64)high << 32); |
} |
static inline void writeq(__u64 val, volatile void __iomem *addr) |
{ |
writel(val, addr); |
writel(val >> 32, addr+4); |
} |
#define swap(a, b) \ |
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
432,9 → 695,6 |
#define dev_info(dev, format, arg...) \ |
printk("Info %s " format , __func__, ## arg) |
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#define BUILD_BUG_ON(condition) |
struct page |
{ |
unsigned int addr; |
467,8 → 727,6 |
#define get_page(a) |
#define put_page(a) |
#define set_pages_uc(a,b) |
#define set_pages_wb(a,b) |
#define pci_map_page(dev, page, offset, size, direction) \ |
(dma_addr_t)( (offset)+page_to_phys(page)) |
475,36 → 733,31 |
#define pci_unmap_page(dev, dma_address, size, direction) |
#define GFP_TEMPORARY 0 |
#define __GFP_NOWARN 0 |
#define __GFP_NORETRY 0 |
#define GFP_NOWAIT 0 |
#define IS_ENABLED(a) 0 |
#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) |
#define RCU_INIT_POINTER(p, v) \ |
do { \ |
p = (typeof(*v) __force __rcu *)(v); \ |
} while (0) |
//#define RCU_INIT_POINTER(p, v) \ |
// do { \ |
// p = (typeof(*v) __force __rcu *)(v); \ |
// } while (0) |
#define rcu_dereference_raw(p) ({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
(_________p1); \ |
}) |
#define rcu_assign_pointer(p, v) \ |
({ \ |
if (!__builtin_constant_p(v) || \ |
((v) != NULL)) \ |
(p) = (v); \ |
}) |
//#define rcu_dereference_raw(p) ({ \ |
// typeof(p) _________p1 = ACCESS_ONCE(p); \ |
// (_________p1); \ |
// }) |
//#define rcu_assign_pointer(p, v) \ |
// ({ \ |
// if (!__builtin_constant_p(v) || \ |
// ((v) != NULL)) \ |
// (p) = (v); \ |
// }) |
unsigned int hweight16(unsigned int w); |
#define cpufreq_quick_get_max(x) GetCpuFreq() |
extern unsigned int tsc_khz; |
540,7 → 793,7 |
} |
} |
memcpy((void __force *)to, from, n); |
__builtin_memcpy((void __force *)to, from, n); |
return 0; |
} |
551,6 → 804,14 |
void kunmap(struct page *page); |
void kunmap_atomic(void *vaddr); |
typedef u64 async_cookie_t; |
#define iowrite32(v, addr) writel((v), (addr)) |
#define __init |
#define CONFIG_PAGE_OFFSET 0 |
#endif |
/drivers/include/linux/kobject.h |
---|
25,7 → 25,8 |
//#include <linux/kobject_ns.h> |
#include <linux/kernel.h> |
#include <linux/wait.h> |
//#include <linux/atomic.h> |
#include <linux/atomic.h> |
#include <linux/workqueue.h> |
#define UEVENT_HELPER_PATH_LEN 256 |
#define UEVENT_NUM_ENVP 32 /* number of env pointers */ |
/drivers/include/linux/kref.h |
---|
15,7 → 15,11 |
#ifndef _KREF_H_ |
#define _KREF_H_ |
#include <linux/types.h> |
#include <linux/bug.h> |
#include <linux/atomic.h> |
#include <linux/kernel.h> |
#include <linux/mutex.h> |
#include <linux/spinlock.h> |
struct kref { |
atomic_t refcount; |
/drivers/include/linux/linkage.h |
---|
0,0 → 1,112 |
#ifndef _LINUX_LINKAGE_H |
#define _LINUX_LINKAGE_H |
#include <linux/compiler.h> |
#include <linux/stringify.h> |
#include <linux/export.h> |
#include <asm/linkage.h> |
/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ |
#ifndef ASM_NL |
#define ASM_NL ; |
#endif |
#ifdef __cplusplus |
#define CPP_ASMLINKAGE extern "C" |
#else |
#define CPP_ASMLINKAGE |
#endif |
#ifndef asmlinkage |
#define asmlinkage CPP_ASMLINKAGE |
#endif |
#ifndef cond_syscall |
#define cond_syscall(x) asm( \ |
".weak " VMLINUX_SYMBOL_STR(x) "\n\t" \ |
".set " VMLINUX_SYMBOL_STR(x) "," \ |
VMLINUX_SYMBOL_STR(sys_ni_syscall)) |
#endif |
#ifndef SYSCALL_ALIAS |
#define SYSCALL_ALIAS(alias, name) asm( \ |
".globl " VMLINUX_SYMBOL_STR(alias) "\n\t" \ |
".set " VMLINUX_SYMBOL_STR(alias) "," \ |
VMLINUX_SYMBOL_STR(name)) |
#endif |
#define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) |
#define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) |
/* |
* For assembly routines. |
* |
* Note when using these that you must specify the appropriate |
* alignment directives yourself |
*/ |
#define __PAGE_ALIGNED_DATA .section ".data..page_aligned", "aw" |
#define __PAGE_ALIGNED_BSS .section ".bss..page_aligned", "aw" |
/* |
* This is used by architectures to keep arguments on the stack |
* untouched by the compiler by keeping them live until the end. |
* The argument stack may be owned by the assembly-language |
* caller, not the callee, and gcc doesn't always understand |
* that. |
* |
* We have the return value, and a maximum of six arguments. |
* |
* This should always be followed by a "return ret" for the |
* protection to work (ie no more work that the compiler might |
* end up needing stack temporaries for). |
*/ |
/* Assembly files may be compiled with -traditional .. */ |
#ifndef __ASSEMBLY__ |
#ifndef asmlinkage_protect |
# define asmlinkage_protect(n, ret, args...) do { } while (0) |
#endif |
#endif |
#ifndef __ALIGN |
#define __ALIGN .align 4,0x90 |
#define __ALIGN_STR ".align 4,0x90" |
#endif |
#ifdef __ASSEMBLY__ |
#ifndef LINKER_SCRIPT |
#define ALIGN __ALIGN |
#define ALIGN_STR __ALIGN_STR |
#ifndef ENTRY |
#define ENTRY(name) \ |
.globl name ASM_NL \ |
ALIGN ASM_NL \ |
name: |
#endif |
#endif /* LINKER_SCRIPT */ |
#ifndef WEAK |
#define WEAK(name) \ |
.weak name ASM_NL \ |
name: |
#endif |
#ifndef END |
#define END(name) \ |
.size name, .-name |
#endif |
/* If symbol 'name' is treated as a subroutine (gets called, and returns) |
* then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of |
* static analysis tools such as stack depth analyzer. |
*/ |
#ifndef ENDPROC |
#define ENDPROC(name) \ |
.type name, @function ASM_NL \ |
END(name) |
#endif |
#endif |
#endif |
/drivers/include/linux/list.h |
---|
4,6 → 4,8 |
#include <linux/types.h> |
#include <linux/stddef.h> |
#include <linux/poison.h> |
#include <linux/const.h> |
#include <linux/kernel.h> |
/* |
* Simple doubly linked list implementation. |
344,7 → 346,7 |
* list_entry - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_entry(ptr, type, member) \ |
container_of(ptr, type, member) |
353,7 → 355,7 |
* list_first_entry - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
364,7 → 366,7 |
* list_last_entry - get the last element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
375,7 → 377,7 |
* list_first_entry_or_null - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
*/ |
385,7 → 387,7 |
/** |
* list_next_entry - get the next element in list |
* @pos: the type * to cursor |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_next_entry(pos, member) \ |
list_entry((pos)->member.next, typeof(*(pos)), member) |
393,7 → 395,7 |
/** |
* list_prev_entry - get the prev element in list |
* @pos: the type * to cursor |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_prev_entry(pos, member) \ |
list_entry((pos)->member.prev, typeof(*(pos)), member) |
439,7 → 441,7 |
* list_for_each_entry - iterate over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_first_entry(head, typeof(*pos), member); \ |
450,7 → 452,7 |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_last_entry(head, typeof(*pos), member); \ |
461,7 → 463,7 |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
* @pos: the type * to use as a start point |
* @head: the head of the list |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
*/ |
472,7 → 474,7 |
* list_for_each_entry_continue - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
486,7 → 488,7 |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Start to iterate over list of given type backwards, continuing after |
* the current position. |
500,7 → 502,7 |
* list_for_each_entry_from - iterate over list of given type from the current point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate over list of given type, continuing from current position. |
*/ |
513,7 → 515,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_first_entry(head, typeof(*pos), member), \ |
526,7 → 528,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate over list of given type, continuing after current point, |
* safe against removal of list entry. |
542,7 → 544,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate over list of given type from current point, safe against |
* removal of list entry. |
557,7 → 559,7 |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Iterate backwards over list of given type, safe against removal |
* of list entry. |
572,7 → 574,7 |
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop |
* @pos: the loop cursor used in the list_for_each_entry_safe loop |
* @n: temporary storage used in list_for_each_entry_safe |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* list_safe_reset_next is not safe to use in general if the list may be |
* modified concurrently (eg. the lock is dropped in the loop body). An |
/drivers/include/linux/lockdep.h |
---|
4,7 → 4,7 |
* Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
* |
* see Documentation/lockdep-design.txt for more details. |
* see Documentation/locking/lockdep-design.txt for more details. |
*/ |
#ifndef __LINUX_LOCKDEP_H |
#define __LINUX_LOCKDEP_H |
12,6 → 12,10 |
struct task_struct; |
struct lockdep_map; |
/* for sysctl */ |
extern int prove_locking; |
extern int lock_stat; |
#ifdef CONFIG_LOCKDEP |
#include <linux/linkage.h> |
51,6 → 55,8 |
struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; |
}; |
extern struct lock_class_key __lockdep_no_validate__; |
#define LOCKSTAT_POINTS 4 |
/* |
151,7 → 157,25 |
#endif |
}; |
static inline void lockdep_copy_map(struct lockdep_map *to, |
struct lockdep_map *from) |
{ |
int i; |
*to = *from; |
/* |
* Since the class cache can be modified concurrently we could observe |
* half pointers (64bit arch using 32bit copy insns). Therefore clear |
* the caches and take the performance hit. |
* |
* XXX it doesn't work well with lockdep_set_class_and_subclass(), since |
* that relies on cache abuse. |
*/ |
for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
to->class_cache[i] = NULL; |
} |
/* |
* Every lock has a list of other locks that were taken after it. |
* We only grow the list, never remove from it: |
*/ |
338,6 → 362,10 |
WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#define lockdep_assert_held_once(l) do { \ |
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
#else /* !CONFIG_LOCKDEP */ |
388,6 → 416,7 |
#define lockdep_depth(tsk) (0) |
#define lockdep_assert_held(l) do { (void)(l); } while (0) |
#define lockdep_assert_held_once(l) do { (void)(l); } while (0) |
#define lockdep_recursing(tsk) (0) |
454,82 → 483,35 |
* on the per lock-class debug mode: |
*/ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# endif |
#define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
#define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i) |
#define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i) |
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
# define spin_release(l, n, i) lock_release(l, n, i) |
#else |
# define spin_acquire(l, s, t, i) do { } while (0) |
# define spin_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i) |
# else |
# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i) |
# endif |
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
# define rwlock_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwlock_acquire(l, s, t, i) do { } while (0) |
# define rwlock_acquire_read(l, s, t, i) do { } while (0) |
# define rwlock_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# else |
# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# endif |
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i) |
#define seqcount_release(l, n, i) lock_release(l, n, i) |
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
# define mutex_release(l, n, i) lock_release(l, n, i) |
#else |
# define mutex_acquire(l, s, t, i) do { } while (0) |
# define mutex_acquire_nest(l, s, t, n, i) do { } while (0) |
# define mutex_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i) |
# else |
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i) |
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i) |
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i) |
# endif |
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i) |
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i) |
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i) |
# define rwsem_release(l, n, i) lock_release(l, n, i) |
#else |
# define rwsem_acquire(l, s, t, i) do { } while (0) |
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0) |
# define rwsem_acquire_read(l, s, t, i) do { } while (0) |
# define rwsem_release(l, n, i) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# ifdef CONFIG_PROVE_LOCKING |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_) |
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_) |
# else |
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_) |
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_) |
# endif |
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) |
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) |
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) |
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_) |
#else |
# define lock_map_acquire(l) do { } while (0) |
# define lock_map_acquire_read(l) do { } while (0) |
# define lock_map_release(l) do { } while (0) |
#endif |
#ifdef CONFIG_PROVE_LOCKING |
# define might_lock(lock) \ |
/drivers/include/linux/mm.h |
---|
1,13 → 1,13 |
#ifndef _LINUX_MM_H |
#define _LINUX_MM_H |
#include <kernel.h> |
#include <linux/errno.h> |
#define VM_NORESERVE 0x00200000 |
#define nth_page(page,n) ((void*)(((page_to_phys(page)>>12)+(n))<<12)) |
#define page_to_pfn(page) (page_to_phys(page)>>12) |
#define __page_to_pfn(page) (page_to_phys(page)>>12) |
/* to align the pointer to the (next) page boundary */ |
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) |
/drivers/include/linux/mmdebug.h |
---|
0,0 → 1,58 |
#ifndef LINUX_MM_DEBUG_H |
#define LINUX_MM_DEBUG_H 1 |
#include <linux/stringify.h> |
struct page; |
struct vm_area_struct; |
struct mm_struct; |
extern void dump_page(struct page *page, const char *reason); |
extern void dump_page_badflags(struct page *page, const char *reason, |
unsigned long badflags); |
void dump_vma(const struct vm_area_struct *vma); |
void dump_mm(const struct mm_struct *mm); |
#ifdef CONFIG_DEBUG_VM |
#define VM_BUG_ON(cond) BUG_ON(cond) |
#define VM_BUG_ON_PAGE(cond, page) \ |
do { \ |
if (unlikely(cond)) { \ |
dump_page(page, "VM_BUG_ON_PAGE(" __stringify(cond)")");\ |
BUG(); \ |
} \ |
} while (0) |
#define VM_BUG_ON_VMA(cond, vma) \ |
do { \ |
if (unlikely(cond)) { \ |
dump_vma(vma); \ |
BUG(); \ |
} \ |
} while (0) |
#define VM_BUG_ON_MM(cond, mm) \ |
do { \ |
if (unlikely(cond)) { \ |
dump_mm(mm); \ |
BUG(); \ |
} \ |
} while (0) |
#define VM_WARN_ON(cond) WARN_ON(cond) |
#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) |
#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) |
#else |
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) |
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) |
#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) |
#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) |
#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) |
#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) |
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) |
#endif |
#ifdef CONFIG_DEBUG_VIRTUAL |
#define VIRTUAL_BUG_ON(cond) BUG_ON(cond) |
#else |
#define VIRTUAL_BUG_ON(cond) do { } while (0) |
#endif |
#endif |
/drivers/include/linux/mod_devicetable.h |
---|
9,7 → 9,7 |
#ifdef __KERNEL__ |
#include <linux/types.h> |
#include <mutex.h> |
#include <linux/uuid.h> |
typedef unsigned long kernel_ulong_t; |
#endif |
69,7 → 69,7 |
* @bDeviceClass: Class of device; numbers are assigned |
* by the USB forum. Products may choose to implement classes, |
* or be vendor-specific. Device classes specify behavior of all |
* the interfaces on a devices. |
* the interfaces on a device. |
* @bDeviceSubClass: Subclass of device; associated with bDeviceClass. |
* @bDeviceProtocol: Protocol of device; associated with bDeviceClass. |
* @bInterfaceClass: Class of interface; numbers are assigned |
/drivers/include/linux/module.h |
---|
8,9 → 8,13 |
*/ |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/cache.h> |
#include <linux/compiler.h> |
#include <linux/kernel.h> |
#include <linux/moduleparam.h> |
#include <linux/export.h> |
#include <linux/printk.h> |
#define MODULE_FIRMWARE(x) |
/drivers/include/linux/moduleparam.h |
---|
1,3 → 1,10 |
#ifndef _LINUX_MODULE_PARAMS_H |
#define _LINUX_MODULE_PARAMS_H |
/* (C) Copyright 2001, 2002 Rusty Russell IBM Corporation */ |
#include <linux/kernel.h> |
#define MODULE_PARM_DESC(_parm, desc) |
#define module_param_named(name, value, type, perm) |
#define module_param_named_unsafe(name, value, type, perm) |
#endif |
/drivers/include/linux/mutex.h |
---|
10,8 → 10,12 |
#ifndef __LINUX_MUTEX_H |
#define __LINUX_MUTEX_H |
#include <asm/current.h> |
#include <linux/list.h> |
#include <asm/atomic.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <asm/processor.h> |
/* |
* Simple, straightforward mutexes with strict semantics: |
/drivers/include/linux/pci.h |
---|
17,9 → 17,13 |
#define LINUX_PCI_H |
#include <linux/types.h> |
#include <list.h> |
#include <linux/list.h> |
#include <linux/compiler.h> |
#include <linux/errno.h> |
#include <linux/atomic.h> |
#include <linux/pci_regs.h> /* The pci register defines */ |
#include <ioport.h> |
#include <linux/ioport.h> |
#define PCI_CFG_SPACE_SIZE 256 |
311,6 → 315,19 |
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2, |
}; |
/* These values come from the PCI Express Spec */ |
enum pcie_link_width { |
PCIE_LNK_WIDTH_RESRV = 0x00, |
PCIE_LNK_X1 = 0x01, |
PCIE_LNK_X2 = 0x02, |
PCIE_LNK_X4 = 0x04, |
PCIE_LNK_X8 = 0x08, |
PCIE_LNK_X12 = 0x0C, |
PCIE_LNK_X16 = 0x10, |
PCIE_LNK_X32 = 0x20, |
PCIE_LNK_WIDTH_UNKNOWN = 0xFF, |
}; |
/* Based on the PCI Hotplug Spec, but some values are made up by us */ |
enum pci_bus_speed { |
PCI_SPEED_33MHz = 0x00, |
338,6 → 355,23 |
PCI_SPEED_UNKNOWN = 0xff, |
}; |
struct pci_cap_saved_data { |
u16 cap_nr; |
bool cap_extended; |
unsigned int size; |
u32 data[0]; |
}; |
struct pci_cap_saved_state { |
struct hlist_node next; |
struct pci_cap_saved_data cap; |
}; |
struct pcie_link_state; |
struct pci_vpd; |
struct pci_sriov; |
struct pci_ats; |
/* |
* The pci_dev structure is used to describe PCI devices. |
*/ |
349,7 → 383,7 |
void *sysdata; /* hook for sys-specific extension */ |
// struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
struct pci_slot *slot; /* Physical slot this device is in */ |
u32_t busnr; |
u32 busnr; |
unsigned int devfn; /* encoded device & function index */ |
unsigned short vendor; |
unsigned short device; |
365,7 → 399,7 |
u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */ |
// struct pci_driver *driver; /* which driver has allocated this device */ |
uint64_t dma_mask; /* Mask of the bits of bus address this |
u64 dma_mask; /* Mask of the bits of bus address this |
device implements. Normally this is |
0xffffffff. You only need to change |
this if your device has broken DMA |
548,7 → 582,7 |
case PCIBIOS_FUNC_NOT_SUPPORTED: |
return -ENOENT; |
case PCIBIOS_BAD_VENDOR_ID: |
return -EINVAL; |
return -ENOTTY; |
case PCIBIOS_DEVICE_NOT_FOUND: |
return -ENODEV; |
case PCIBIOS_BAD_REGISTER_NUMBER: |
559,7 → 593,7 |
return -ENOSPC; |
} |
return -ENOTTY; |
return -ERANGE; |
} |
/* Low-level architecture-dependent routines */ |
569,7 → 603,20 |
int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); |
}; |
/* |
* ACPI needs to be able to access PCI config space before we've done a |
* PCI bus scan and created pci_bus structures. |
*/ |
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, |
int reg, int len, u32 *val); |
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, |
int reg, int len, u32 val); |
struct pci_bus_region { |
dma_addr_t start; |
dma_addr_t end; |
}; |
enum pci_bar_type { |
pci_bar_unknown, /* Standard PCI BAR probe */ |
pci_bar_io, /* An io port BAR */ |
/drivers/include/linux/percpu-defs.h |
---|
0,0 → 1,516 |
/* |
* linux/percpu-defs.h - basic definitions for percpu areas |
* |
* DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. |
* |
* This file is separate from linux/percpu.h to avoid cyclic inclusion |
* dependency from arch header files. Only to be included from |
* asm/percpu.h. |
* |
* This file includes macros necessary to declare percpu sections and |
* variables, and definitions of percpu accessors and operations. It |
* should provide enough percpu features to arch header files even when |
* they can only include asm/percpu.h to avoid cyclic inclusion dependency. |
*/ |
#ifndef _LINUX_PERCPU_DEFS_H |
#define _LINUX_PERCPU_DEFS_H |
#ifdef CONFIG_SMP |
#ifdef MODULE |
#define PER_CPU_SHARED_ALIGNED_SECTION "" |
#define PER_CPU_ALIGNED_SECTION "" |
#else |
#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" |
#define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
#endif |
#define PER_CPU_FIRST_SECTION "..first" |
#else |
#define PER_CPU_SHARED_ALIGNED_SECTION "" |
#define PER_CPU_ALIGNED_SECTION "..shared_aligned" |
#define PER_CPU_FIRST_SECTION "" |
#endif |
/* |
* Base implementations of per-CPU variable declarations and definitions, where |
* the section in which the variable is to be placed is provided by the |
* 'sec' argument. This may be used to affect the parameters governing the |
* variable's storage. |
* |
* NOTE! The sections for the DECLARE and for the DEFINE must match, lest |
* linkage errors occur due the compiler generating the wrong code to access |
* that section. |
*/ |
#define __PCPU_ATTRS(sec) \ |
__percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \ |
PER_CPU_ATTRIBUTES |
#define __PCPU_DUMMY_ATTRS \ |
__attribute__((section(".discard"), unused)) |
/* |
* s390 and alpha modules require percpu variables to be defined as |
* weak to force the compiler to generate GOT based external |
* references for them. This is necessary because percpu sections |
* will be located outside of the usually addressable area. |
* |
* This definition puts the following two extra restrictions when |
* defining percpu variables. |
* |
* 1. The symbol must be globally unique, even the static ones. |
* 2. Static percpu variables cannot be defined inside a function. |
* |
* Archs which need weak percpu definitions should define |
* ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. |
* |
* To ensure that the generic code observes the above two |
* restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak |
* definition is used for all cases. |
*/ |
#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) |
/* |
* __pcpu_scope_* dummy variable is used to enforce scope. It |
* receives the static modifier when it's used in front of |
* DEFINE_PER_CPU() and will trigger build failure if |
* DECLARE_PER_CPU() is used for the same variable. |
* |
* __pcpu_unique_* dummy variable is used to enforce symbol uniqueness |
* such that hidden weak symbol collision, which will cause unrelated |
* variables to share the same address, can be detected during build. |
*/ |
#define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
extern __PCPU_ATTRS(sec) __typeof__(type) name |
#define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ |
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ |
extern __PCPU_ATTRS(sec) __typeof__(type) name; \ |
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ |
__typeof__(type) name |
#else |
/* |
* Normal declaration and definition macros. |
*/ |
#define DECLARE_PER_CPU_SECTION(type, name, sec) \ |
extern __PCPU_ATTRS(sec) __typeof__(type) name |
#define DEFINE_PER_CPU_SECTION(type, name, sec) \ |
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \ |
__typeof__(type) name |
#endif |
/* |
* Variant on the per-CPU variable declaration/definition theme used for |
* ordinary per-CPU variables. |
*/ |
#define DECLARE_PER_CPU(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, "") |
#define DEFINE_PER_CPU(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, "") |
/* |
* Declaration/definition used for per-CPU variables that must come first in |
* the set of variables. |
*/ |
#define DECLARE_PER_CPU_FIRST(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
#define DEFINE_PER_CPU_FIRST(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) |
/* |
* Declaration/definition used for per-CPU variables that must be cacheline |
* aligned under SMP conditions so that, whilst a particular instance of the |
* data corresponds to a particular CPU, inefficiencies due to direct access by |
* other CPUs are reduced by preventing the data from unnecessarily spanning |
* cachelines. |
* |
* An example of this would be statistical data, where each CPU's set of data |
* is updated by that CPU alone, but the data from across all CPUs is collated |
* by a CPU processing a read from a proc file. |
*/ |
#define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
____cacheline_aligned_in_smp |
#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
____cacheline_aligned_in_smp |
#define DECLARE_PER_CPU_ALIGNED(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
____cacheline_aligned |
#define DEFINE_PER_CPU_ALIGNED(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \ |
____cacheline_aligned |
/* |
* Declaration/definition used for per-CPU variables that must be page aligned. |
*/ |
#define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
__aligned(PAGE_SIZE) |
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ |
__aligned(PAGE_SIZE) |
/* |
* Declaration/definition used for per-CPU variables that must be read mostly. |
*/ |
#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ |
DECLARE_PER_CPU_SECTION(type, name, "..read_mostly") |
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ |
DEFINE_PER_CPU_SECTION(type, name, "..read_mostly") |
/* |
* Intermodule exports for per-CPU variables. sparse forgets about |
* address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to |
* noop if __CHECKER__. |
*/ |
#ifndef __CHECKER__ |
#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var) |
#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var) |
#else |
#define EXPORT_PER_CPU_SYMBOL(var) |
#define EXPORT_PER_CPU_SYMBOL_GPL(var) |
#endif |
/* |
* Accessors and operations. |
*/ |
#ifndef __ASSEMBLY__ |
/* |
* __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating |
* @ptr and is invoked once before a percpu area is accessed by all |
* accessors and operations. This is performed in the generic part of |
* percpu and arch overrides don't need to worry about it; however, if an |
* arch wants to implement an arch-specific percpu accessor or operation, |
* it may use __verify_pcpu_ptr() to verify the parameters. |
* |
* + 0 is required in order to convert the pointer type from a |
* potential array type to a pointer to a single item of the array. |
*/ |
#define __verify_pcpu_ptr(ptr) \ |
do { \ |
const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ |
(void)__vpp_verify; \ |
} while (0) |
#ifdef CONFIG_SMP |
/* |
* Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() |
* to prevent the compiler from making incorrect assumptions about the |
* pointer value. The weird cast keeps both GCC and sparse happy. |
*/ |
#define SHIFT_PERCPU_PTR(__p, __offset) \ |
RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) |
#define per_cpu_ptr(ptr, cpu) \ |
({ \ |
__verify_pcpu_ptr(ptr); \ |
SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ |
}) |
#define raw_cpu_ptr(ptr) \ |
({ \ |
__verify_pcpu_ptr(ptr); \ |
arch_raw_cpu_ptr(ptr); \ |
}) |
#ifdef CONFIG_DEBUG_PREEMPT |
#define this_cpu_ptr(ptr) \ |
({ \ |
__verify_pcpu_ptr(ptr); \ |
SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ |
}) |
#else |
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
#endif |
#else /* CONFIG_SMP */ |
#define VERIFY_PERCPU_PTR(__p) \ |
({ \ |
__verify_pcpu_ptr(__p); \ |
(typeof(*(__p)) __kernel __force *)(__p); \ |
}) |
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) |
#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) |
#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) |
#endif /* CONFIG_SMP */ |
#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) |
/* |
* Must be an lvalue. Since @var must be a simple identifier, |
* we force a syntax error here if it isn't. |
*/ |
#define get_cpu_var(var) \ |
(*({ \ |
preempt_disable(); \ |
this_cpu_ptr(&var); \ |
})) |
/* |
* The weird & is necessary because sparse considers (void)(var) to be |
* a direct dereference of percpu variable (var). |
*/ |
#define put_cpu_var(var) \ |
do { \ |
(void)&(var); \ |
preempt_enable(); \ |
} while (0) |
#define get_cpu_ptr(var) \ |
({ \ |
preempt_disable(); \ |
this_cpu_ptr(var); \ |
}) |
#define put_cpu_ptr(var) \ |
do { \ |
(void)(var); \ |
preempt_enable(); \ |
} while (0) |
/* |
* Branching function to split up a function into a set of functions that |
* are called for different scalar sizes of the objects handled. |
*/ |
extern void __bad_size_call_parameter(void); |
#ifdef CONFIG_DEBUG_PREEMPT |
extern void __this_cpu_preempt_check(const char *op); |
#else |
static inline void __this_cpu_preempt_check(const char *op) { } |
#endif |
#define __pcpu_size_call_return(stem, variable) \ |
({ \ |
typeof(variable) pscr_ret__; \ |
__verify_pcpu_ptr(&(variable)); \ |
switch(sizeof(variable)) { \ |
case 1: pscr_ret__ = stem##1(variable); break; \ |
case 2: pscr_ret__ = stem##2(variable); break; \ |
case 4: pscr_ret__ = stem##4(variable); break; \ |
case 8: pscr_ret__ = stem##8(variable); break; \ |
default: \ |
__bad_size_call_parameter(); break; \ |
} \ |
pscr_ret__; \ |
}) |
#define __pcpu_size_call_return2(stem, variable, ...) \ |
({ \ |
typeof(variable) pscr2_ret__; \ |
__verify_pcpu_ptr(&(variable)); \ |
switch(sizeof(variable)) { \ |
case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ |
case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ |
case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ |
case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ |
default: \ |
__bad_size_call_parameter(); break; \ |
} \ |
pscr2_ret__; \ |
}) |
/* |
* Special handling for cmpxchg_double. cmpxchg_double is passed two |
* percpu variables. The first has to be aligned to a double word |
* boundary and the second has to follow directly thereafter. |
* We enforce this on all architectures even if they don't support |
* a double cmpxchg instruction, since it's a cheap requirement, and it |
* avoids breaking the requirement for architectures with the instruction. |
*/ |
#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ |
({ \ |
bool pdcrb_ret__; \ |
__verify_pcpu_ptr(&(pcp1)); \ |
BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ |
VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ |
VM_BUG_ON((unsigned long)(&(pcp2)) != \ |
(unsigned long)(&(pcp1)) + sizeof(pcp1)); \ |
switch(sizeof(pcp1)) { \ |
case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ |
case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ |
case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ |
case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ |
default: \ |
__bad_size_call_parameter(); break; \ |
} \ |
pdcrb_ret__; \ |
}) |
#define __pcpu_size_call(stem, variable, ...) \ |
do { \ |
__verify_pcpu_ptr(&(variable)); \ |
switch(sizeof(variable)) { \ |
case 1: stem##1(variable, __VA_ARGS__);break; \ |
case 2: stem##2(variable, __VA_ARGS__);break; \ |
case 4: stem##4(variable, __VA_ARGS__);break; \ |
case 8: stem##8(variable, __VA_ARGS__);break; \ |
default: \ |
__bad_size_call_parameter();break; \ |
} \ |
} while (0) |
/* |
* this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> |
* |
* Optimized manipulation for memory allocated through the per cpu |
* allocator or for addresses of per cpu variables. |
* |
* These operation guarantee exclusivity of access for other operations |
* on the *same* processor. The assumption is that per cpu data is only |
* accessed by a single processor instance (the current one). |
* |
* The arch code can provide optimized implementation by defining macros |
* for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per |
* cpu atomic operations for 2 byte sized RMW actions. If arch code does |
* not provide operations for a scalar size then the fallback in the |
* generic code will be used. |
* |
* cmpxchg_double replaces two adjacent scalars at once. The first two |
* parameters are per cpu variables which have to be of the same size. A |
* truth value is returned to indicate success or failure (since a double |
* register result is difficult to handle). There is very limited hardware |
* support for these operations, so only certain sizes may work. |
*/ |
/* |
* Operations for contexts where we do not want to do any checks for |
* preemptions. Unless strictly necessary, always use [__]this_cpu_*() |
* instead. |
* |
* If there is no other protection through preempt disable and/or disabling |
* interupts then one of these RMW operations can show unexpected behavior |
* because the execution thread was rescheduled on another processor or an |
* interrupt occurred and the same percpu variable was modified from the |
* interrupt context. |
*/ |
#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) |
#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) |
#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) |
#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) |
#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) |
#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) |
#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) |
#define raw_cpu_cmpxchg(pcp, oval, nval) \ |
__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) |
#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) |
#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) |
#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) |
#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) |
#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) |
#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) |
/* |
* Operations for contexts that are safe from preemption/interrupts. These |
* operations verify that preemption is disabled. |
*/ |
#define __this_cpu_read(pcp) \ |
({ \ |
__this_cpu_preempt_check("read"); \ |
raw_cpu_read(pcp); \ |
}) |
#define __this_cpu_write(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("write"); \ |
raw_cpu_write(pcp, val); \ |
}) |
#define __this_cpu_add(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("add"); \ |
raw_cpu_add(pcp, val); \ |
}) |
#define __this_cpu_and(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("and"); \ |
raw_cpu_and(pcp, val); \ |
}) |
#define __this_cpu_or(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("or"); \ |
raw_cpu_or(pcp, val); \ |
}) |
#define __this_cpu_add_return(pcp, val) \ |
({ \ |
__this_cpu_preempt_check("add_return"); \ |
raw_cpu_add_return(pcp, val); \ |
}) |
#define __this_cpu_xchg(pcp, nval) \ |
({ \ |
__this_cpu_preempt_check("xchg"); \ |
raw_cpu_xchg(pcp, nval); \ |
}) |
#define __this_cpu_cmpxchg(pcp, oval, nval) \ |
({ \ |
__this_cpu_preempt_check("cmpxchg"); \ |
raw_cpu_cmpxchg(pcp, oval, nval); \ |
}) |
#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
({ __this_cpu_preempt_check("cmpxchg_double"); \ |
raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ |
}) |
#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) |
#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) |
#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) |
#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) |
#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) |
/* |
* Operations with implied preemption protection. These operations can be |
* used without worrying about preemption. Note that interrupts may still |
* occur while an operation is in progress and if the interrupt modifies |
* the variable too then RMW actions may not be reliable. |
*/ |
#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) |
#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) |
#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) |
#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) |
#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) |
#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) |
#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) |
#define this_cpu_cmpxchg(pcp, oval, nval) \ |
__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) |
#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ |
__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) |
#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) |
#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) |
#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) |
#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) |
#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) |
#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) |
#endif /* __ASSEMBLY__ */ |
#endif /* _LINUX_PERCPU_DEFS_H */ |
/drivers/include/linux/personality.h |
---|
0,0 → 1,54 |
#ifndef _LINUX_PERSONALITY_H |
#define _LINUX_PERSONALITY_H |
#include <uapi/linux/personality.h> |
/* |
* Handling of different ABIs (personalities). |
*/ |
struct exec_domain; |
struct pt_regs; |
extern int register_exec_domain(struct exec_domain *); |
extern int unregister_exec_domain(struct exec_domain *); |
extern int __set_personality(unsigned int); |
/* |
* Description of an execution domain. |
* |
* The first two members are refernced from assembly source |
* and should stay where they are unless explicitly needed. |
*/ |
typedef void (*handler_t)(int, struct pt_regs *); |
struct exec_domain { |
const char *name; /* name of the execdomain */ |
handler_t handler; /* handler for syscalls */ |
unsigned char pers_low; /* lowest personality */ |
unsigned char pers_high; /* highest personality */ |
unsigned long *signal_map; /* signal mapping */ |
unsigned long *signal_invmap; /* reverse signal mapping */ |
struct map_segment *err_map; /* error mapping */ |
struct map_segment *socktype_map; /* socket type mapping */ |
struct map_segment *sockopt_map; /* socket option mapping */ |
struct map_segment *af_map; /* address family mapping */ |
struct module *module; /* module context of the ed. */ |
struct exec_domain *next; /* linked list (internal) */ |
}; |
/* |
* Return the base personality without flags. |
*/ |
#define personality(pers) (pers & PER_MASK) |
/* |
* Change personality of the currently running process. |
*/ |
#define set_personality(pers) \ |
((current->personality == (pers)) ? 0 : __set_personality(pers)) |
#endif /* _LINUX_PERSONALITY_H */ |
/drivers/include/linux/printk.h |
---|
0,0 → 1,264 |
#ifndef __KERNEL_PRINTK__ |
#define __KERNEL_PRINTK__ |
#include <stdarg.h> |
#include <linux/linkage.h> |
#include <linux/cache.h> |
extern const char linux_banner[]; |
extern const char linux_proc_banner[]; |
extern char *log_buf_addr_get(void); |
extern u32 log_buf_len_get(void); |
/* printk's without a loglevel use this.. */ |
#define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT |
/* We show everything that is MORE important than this.. */ |
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */ |
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */ |
#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */ |
#define CONSOLE_LOGLEVEL_DEFAULT 7 /* anything MORE serious than KERN_DEBUG */ |
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */ |
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */ |
struct va_format { |
const char *fmt; |
va_list *va; |
}; |
/* |
* FW_BUG |
* Add this to a message where you are sure the firmware is buggy or behaves |
* really stupid or out of spec. Be aware that the responsible BIOS developer |
* should be able to fix this issue or at least get a concrete idea of the |
* problem by reading your message without the need of looking at the kernel |
* code. |
* |
* Use it for definite and high priority BIOS bugs. |
* |
* FW_WARN |
* Use it for not that clear (e.g. could the kernel messed up things already?) |
* and medium priority BIOS bugs. |
* |
* FW_INFO |
* Use this one if you want to tell the user or vendor about something |
* suspicious, but generally harmless related to the firmware. |
* |
* Use it for information or very low priority BIOS bugs. |
*/ |
#define FW_BUG "[Firmware Bug]: " |
#define FW_WARN "[Firmware Warn]: " |
#define FW_INFO "[Firmware Info]: " |
/* |
* HW_ERR |
* Add this to a message for hardware errors, so that user can report |
* it to hardware vendor instead of LKML or software vendor. |
*/ |
#define HW_ERR "[Hardware Error]: " |
/* |
* DEPRECATED |
* Add this to a message whenever you want to warn user space about the use |
* of a deprecated aspect of an API so they can stop using it |
*/ |
#define DEPRECATED "[Deprecated]: " |
static inline __printf(1, 2) |
int no_printk(const char *fmt, ...) |
{ |
return 0; |
} |
__printf(1, 2) int dbgprintf(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
#ifndef pr_fmt |
#define pr_fmt(fmt) fmt |
#endif |
#define pr_debug(fmt, ...) \ |
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
/* |
* These can be used to print at the various log levels. |
* All of these will print unconditionally, although note that pr_debug() |
* and other debug macros are compiled out unless either DEBUG is defined |
* or CONFIG_DYNAMIC_DEBUG is set. |
*/ |
#define pr_emerg(fmt, ...) \ |
printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_alert(fmt, ...) \ |
printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_crit(fmt, ...) \ |
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_err(fmt, ...) \ |
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warning(fmt, ...) \ |
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warn pr_warning |
#define pr_notice(fmt, ...) \ |
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_info(fmt, ...) \ |
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_cont(fmt, ...) \ |
printk(KERN_CONT fmt, ##__VA_ARGS__) |
/* pr_devel() should produce zero code unless DEBUG is defined */ |
#ifdef DEBUG |
#define pr_devel(fmt, ...) \ |
printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_devel(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* |
* Print a one-time message (analogous to WARN_ONCE() et al): |
*/ |
#ifdef CONFIG_PRINTK |
#define printk_once(fmt, ...) \ |
({ \ |
static bool __print_once __read_mostly; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
#define printk_deferred_once(fmt, ...) \ |
({ \ |
static bool __print_once __read_mostly; \ |
\ |
if (!__print_once) { \ |
__print_once = true; \ |
printk_deferred(fmt, ##__VA_ARGS__); \ |
} \ |
}) |
#else |
#define printk_once(fmt, ...) \ |
no_printk(fmt, ##__VA_ARGS__) |
#define printk_deferred_once(fmt, ...) \ |
no_printk(fmt, ##__VA_ARGS__) |
#endif |
#define pr_emerg_once(fmt, ...) \ |
printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_alert_once(fmt, ...) \ |
printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_crit_once(fmt, ...) \ |
printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_err_once(fmt, ...) \ |
printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warn_once(fmt, ...) \ |
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_notice_once(fmt, ...) \ |
printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_info_once(fmt, ...) \ |
printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_cont_once(fmt, ...) \ |
printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__) |
#if defined(DEBUG) |
#define pr_devel_once(fmt, ...) \ |
printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_devel_once(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* If you are writing a driver, please use dev_dbg instead */ |
#if defined(DEBUG) |
#define pr_debug_once(fmt, ...) \ |
printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_debug_once(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* |
* ratelimited messages with local ratelimit_state, |
* no local ratelimit_state used in the !PRINTK case |
*/ |
#ifdef CONFIG_PRINTK |
#define printk_ratelimited(fmt, ...) \ |
({ \ |
static DEFINE_RATELIMIT_STATE(_rs, \ |
DEFAULT_RATELIMIT_INTERVAL, \ |
DEFAULT_RATELIMIT_BURST); \ |
\ |
if (__ratelimit(&_rs)) \ |
printk(fmt, ##__VA_ARGS__); \ |
}) |
#else |
#define printk_ratelimited(fmt, ...) \ |
no_printk(fmt, ##__VA_ARGS__) |
#endif |
#define pr_emerg_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_alert_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_crit_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_err_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_warn_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_notice_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
#define pr_info_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
/* no pr_cont_ratelimited, don't do that... */ |
#if defined(DEBUG) |
#define pr_devel_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_devel_ratelimited(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
/* If you are writing a driver, please use dev_dbg instead */ |
#if defined(CONFIG_DYNAMIC_DEBUG) |
/* descriptor check is first to prevent flooding with "callbacks suppressed" */ |
#define pr_debug_ratelimited(fmt, ...) \ |
do { \ |
static DEFINE_RATELIMIT_STATE(_rs, \ |
DEFAULT_RATELIMIT_INTERVAL, \ |
DEFAULT_RATELIMIT_BURST); \ |
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ |
__ratelimit(&_rs)) \ |
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \ |
} while (0) |
#elif defined(DEBUG) |
#define pr_debug_ratelimited(fmt, ...) \ |
printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#else |
#define pr_debug_ratelimited(fmt, ...) \ |
no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
#endif |
extern const struct file_operations kmsg_fops; |
enum { |
DUMP_PREFIX_NONE, |
DUMP_PREFIX_ADDRESS, |
DUMP_PREFIX_OFFSET |
}; |
extern void hex_dump_to_buffer(const void *buf, size_t len, |
int rowsize, int groupsize, |
char *linebuf, size_t linebuflen, bool ascii); |
extern void print_hex_dump(const char *level, const char *prefix_str, |
int prefix_type, int rowsize, int groupsize, |
const void *buf, size_t len, bool ascii); |
extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, |
const void *buf, size_t len); |
#endif |
/drivers/include/linux/range.h |
---|
0,0 → 1,30 |
#ifndef _LINUX_RANGE_H |
#define _LINUX_RANGE_H |
struct range { |
u64 start; |
u64 end; |
}; |
int add_range(struct range *range, int az, int nr_range, |
u64 start, u64 end); |
int add_range_with_merge(struct range *range, int az, int nr_range, |
u64 start, u64 end); |
void subtract_range(struct range *range, int az, u64 start, u64 end); |
int clean_sort_range(struct range *range, int az); |
void sort_range(struct range *range, int nr_range); |
#define MAX_RESOURCE ((resource_size_t)~0) |
static inline resource_size_t cap_resource(u64 val) |
{ |
if (val > MAX_RESOURCE) |
return MAX_RESOURCE; |
return val; |
} |
#endif |
/drivers/include/linux/rbtree_augmented.h |
---|
43,6 → 43,16 |
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); |
/* |
* Fixup the rbtree and update the augmented information when rebalancing. |
* |
* On insertion, the user must update the augmented information on the path |
* leading to the inserted node, then call rb_link_node() as usual and |
* rb_augment_inserted() instead of the usual rb_insert_color() call. |
* If rb_augment_inserted() rebalances the rbtree, it will callback into |
* a user provided function to update the augmented information on the |
* affected subtrees. |
*/ |
static inline void |
rb_insert_augmented(struct rb_node *node, struct rb_root *root, |
const struct rb_augment_callbacks *augment) |
/drivers/include/linux/rculist.h |
---|
7,7 → 7,7 |
* RCU-protected list version |
*/ |
#include <linux/list.h> |
//#include <linux/rcupdate.h> |
#include <linux/rcupdate.h> |
/* |
* Why is there no list_empty_rcu()? Because list_empty() serves this |
19,6 → 19,21 |
*/ |
/* |
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers |
* @list: list to be initialized |
* |
* You should instead use INIT_LIST_HEAD() for normal initialization and |
* cleanup tasks, when readers have no access to the list being initialized. |
* However, if the list being initialized is visible to readers, you |
* need to keep the compiler from being too mischievous. |
*/ |
static inline void INIT_LIST_HEAD_RCU(struct list_head *list) |
{ |
ACCESS_ONCE(list->next) = list; |
ACCESS_ONCE(list->prev) = list; |
} |
/* |
* return the ->next pointer of a list_head in an rcu safe |
* way, we must not access it directly |
*/ |
197,7 → 212,7 |
* instead of INIT_LIST_HEAD(). |
*/ |
INIT_LIST_HEAD(list); |
INIT_LIST_HEAD_RCU(list); |
/* |
* At this point, the list body still points to the source list. |
226,7 → 241,7 |
* list_entry_rcu - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
263,7 → 278,7 |
* list_first_or_null_rcu - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
* |
281,7 → 296,7 |
* list_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as list_add_rcu() |
296,7 → 311,7 |
* list_for_each_entry_continue_rcu - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* @member: the name of the list_head within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
527,6 → 542,15 |
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from_rcu(pos, member) \ |
for (; pos; \ |
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member)) |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/rcupdate.h |
---|
0,0 → 1,1158 |
/* |
* Read-Copy Update mechanism for mutual exclusion |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, you can access it online at |
* http://www.gnu.org/licenses/gpl-2.0.html. |
* |
* Copyright IBM Corporation, 2001 |
* |
* Author: Dipankar Sarma <dipankar@in.ibm.com> |
* |
* Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
* and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
* Papers: |
* http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
* http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
* |
* For detailed explanation of Read-Copy Update mechanism see - |
* http://lse.sourceforge.net/locking/rcupdate.html |
* |
*/ |
#ifndef __LINUX_RCUPDATE_H |
#define __LINUX_RCUPDATE_H |
#include <linux/types.h> |
#include <linux/cache.h> |
#include <linux/spinlock.h> |
#include <linux/threads.h> |
//#include <linux/cpumask.h> |
#include <linux/seqlock.h> |
#include <linux/lockdep.h> |
#include <linux/completion.h> |
//#include <linux/debugobjects.h> |
#include <linux/bug.h> |
#include <linux/compiler.h> |
#include <asm/barrier.h> |
extern int rcu_expedited; /* for sysctl */ |
enum rcutorture_type { |
RCU_FLAVOR, |
RCU_BH_FLAVOR, |
RCU_SCHED_FLAVOR, |
RCU_TASKS_FLAVOR, |
SRCU_FLAVOR, |
INVALID_RCU_FLAVOR |
}; |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
unsigned long *gpnum, unsigned long *completed); |
void rcutorture_record_test_transition(void); |
void rcutorture_record_progress(unsigned long vernum); |
void do_trace_rcu_torture_read(const char *rcutorturename, |
struct rcu_head *rhp, |
unsigned long secs, |
unsigned long c_old, |
unsigned long c); |
#else |
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, |
int *flags, |
unsigned long *gpnum, |
unsigned long *completed) |
{ |
*flags = 0; |
*gpnum = 0; |
*completed = 0; |
} |
static inline void rcutorture_record_test_transition(void) |
{ |
} |
static inline void rcutorture_record_progress(unsigned long vernum) |
{ |
} |
#ifdef CONFIG_RCU_TRACE |
void do_trace_rcu_torture_read(const char *rcutorturename, |
struct rcu_head *rhp, |
unsigned long secs, |
unsigned long c_old, |
unsigned long c); |
#else |
#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
do { } while (0) |
#endif |
#endif |
#define UINT_CMP_GE(a, b) (UINT_MAX / 2 >= (a) - (b)) |
#define UINT_CMP_LT(a, b) (UINT_MAX / 2 < (a) - (b)) |
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) |
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) |
#define ulong2long(a) (*(long *)(&(a))) |
/* Exported common interfaces */ |
#ifdef CONFIG_PREEMPT_RCU |
/** |
* call_rcu() - Queue an RCU callback for invocation after a grace period. |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all pre-existing RCU read-side |
* critical sections have completed. However, the callback function |
* might well execute concurrently with RCU read-side critical sections |
* that started after call_rcu() was invoked. RCU read-side critical |
* sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
* and may be nested. |
* |
* Note that all CPUs must agree that the grace period extended beyond |
* all pre-existing RCU read-side critical section. On systems with more |
* than one CPU, this means that when "func()" is invoked, each CPU is |
* guaranteed to have executed a full memory barrier since the end of its |
* last RCU read-side critical section whose beginning preceded the call |
* to call_rcu(). It also means that each CPU executing an RCU read-side |
* critical section that continues beyond the start of "func()" must have |
* executed a memory barrier after the call_rcu() but before the beginning |
* of that RCU read-side critical section. Note that these guarantees |
* include CPUs that are offline, idle, or executing in user mode, as |
* well as CPUs that are executing in the kernel. |
* |
* Furthermore, if CPU A invoked call_rcu() and CPU B invoked the |
* resulting RCU callback function "func()", then both CPU A and CPU B are |
* guaranteed to execute a full memory barrier during the time interval |
* between the call to call_rcu() and the invocation of "func()" -- even |
* if CPU A and CPU B are the same CPU (but again only if the system has |
* more than one CPU). |
*/ |
void call_rcu(struct rcu_head *head, |
void (*func)(struct rcu_head *head)); |
#else /* #ifdef CONFIG_PREEMPT_RCU */ |
/* In classic RCU, call_rcu() is just call_rcu_sched(). */ |
#define call_rcu call_rcu_sched |
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
/** |
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all currently executing RCU |
* read-side critical sections have completed. call_rcu_bh() assumes |
* that the read-side critical sections end on completion of a softirq |
* handler. This means that read-side critical sections in process |
* context must not be interrupted by softirqs. This interface is to be |
* used when most of the read-side critical sections are in softirq context. |
* RCU read-side critical sections are delimited by : |
* - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. |
* OR |
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
* These may be nested. |
* |
* See the description of call_rcu() for more detailed information on |
* memory ordering guarantees. |
*/ |
void call_rcu_bh(struct rcu_head *head, |
void (*func)(struct rcu_head *head)); |
/** |
* call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all currently executing RCU |
* read-side critical sections have completed. call_rcu_sched() assumes |
* that the read-side critical sections end on enabling of preemption |
* or on voluntary preemption. |
* RCU read-side critical sections are delimited by : |
* - rcu_read_lock_sched() and rcu_read_unlock_sched(), |
* OR |
* anything that disables preemption. |
* These may be nested. |
* |
* See the description of call_rcu() for more detailed information on |
* memory ordering guarantees. |
*/ |
void call_rcu_sched(struct rcu_head *head, |
void (*func)(struct rcu_head *rcu)); |
void synchronize_sched(void); |
/** |
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
* @head: structure to be used for queueing the RCU updates. |
* @func: actual callback function to be invoked after the grace period |
* |
* The callback function will be invoked some time after a full grace |
* period elapses, in other words after all currently executing RCU |
* read-side critical sections have completed. call_rcu_tasks() assumes |
* that the read-side critical sections end at a voluntary context |
* switch (not a preemption!), entry into idle, or transition to usermode |
* execution. As such, there are no read-side primitives analogous to |
* rcu_read_lock() and rcu_read_unlock() because this primitive is intended |
* to determine that all tasks have passed through a safe state, not so |
* much for data-strcuture synchronization. |
* |
* See the description of call_rcu() for more detailed information on |
* memory ordering guarantees. |
*/ |
void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head)); |
void synchronize_rcu_tasks(void); |
void rcu_barrier_tasks(void); |
#ifdef CONFIG_PREEMPT_RCU |
void __rcu_read_lock(void); |
void __rcu_read_unlock(void); |
void rcu_read_unlock_special(struct task_struct *t); |
void synchronize_rcu(void); |
/* |
* Defined as a macro as it is a very low level header included from |
* areas that don't even know about current. This gives the rcu_read_lock() |
* nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other |
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable. |
*/ |
#define rcu_preempt_depth() (current->rcu_read_lock_nesting) |
#else /* #ifdef CONFIG_PREEMPT_RCU */ |
static inline void __rcu_read_lock(void) |
{ |
preempt_disable(); |
} |
static inline void __rcu_read_unlock(void) |
{ |
preempt_enable(); |
} |
static inline void synchronize_rcu(void) |
{ |
synchronize_sched(); |
} |
static inline int rcu_preempt_depth(void) |
{ |
return 0; |
} |
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
/* Internal to kernel */ |
void rcu_init(void); |
void rcu_sched_qs(void); |
void rcu_bh_qs(void); |
void rcu_check_callbacks(int user); |
struct notifier_block; |
void rcu_idle_enter(void); |
void rcu_idle_exit(void); |
void rcu_irq_enter(void); |
void rcu_irq_exit(void); |
#ifdef CONFIG_RCU_STALL_COMMON |
void rcu_sysrq_start(void); |
void rcu_sysrq_end(void); |
#else /* #ifdef CONFIG_RCU_STALL_COMMON */ |
static inline void rcu_sysrq_start(void) |
{ |
} |
static inline void rcu_sysrq_end(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ |
#ifdef CONFIG_RCU_USER_QS |
void rcu_user_enter(void); |
void rcu_user_exit(void); |
#else |
static inline void rcu_user_enter(void) { } |
static inline void rcu_user_exit(void) { } |
static inline void rcu_user_hooks_switch(struct task_struct *prev, |
struct task_struct *next) { } |
#endif /* CONFIG_RCU_USER_QS */ |
#ifdef CONFIG_RCU_NOCB_CPU |
void rcu_init_nohz(void); |
#else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
static inline void rcu_init_nohz(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
/** |
* RCU_NONIDLE - Indicate idle-loop code that needs RCU readers |
* @a: Code that RCU needs to pay attention to. |
* |
* RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden |
* in the inner idle loop, that is, between the rcu_idle_enter() and |
* the rcu_idle_exit() -- RCU will happily ignore any such read-side |
* critical sections. However, things like powertop need tracepoints |
* in the inner idle loop. |
* |
* This macro provides the way out: RCU_NONIDLE(do_something_with_RCU()) |
* will tell RCU that it needs to pay attending, invoke its argument |
* (in this example, a call to the do_something_with_RCU() function), |
* and then tell RCU to go back to ignoring this CPU. It is permissible |
* to nest RCU_NONIDLE() wrappers, but the nesting level is currently |
* quite limited. If deeper nesting is required, it will be necessary |
* to adjust DYNTICK_TASK_NESTING_VALUE accordingly. |
*/ |
#define RCU_NONIDLE(a) \ |
do { \ |
rcu_irq_enter(); \ |
do { a; } while (0); \ |
rcu_irq_exit(); \ |
} while (0) |
/* |
* Note a voluntary context switch for RCU-tasks benefit. This is a |
* macro rather than an inline function to avoid #include hell. |
*/ |
#ifdef CONFIG_TASKS_RCU |
#define TASKS_RCU(x) x |
extern struct srcu_struct tasks_rcu_exit_srcu; |
#define rcu_note_voluntary_context_switch(t) \ |
do { \ |
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \ |
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \ |
} while (0) |
#else /* #ifdef CONFIG_TASKS_RCU */ |
#define TASKS_RCU(x) do { } while (0) |
#define rcu_note_voluntary_context_switch(t) do { } while (0) |
#endif /* #else #ifdef CONFIG_TASKS_RCU */ |
/** |
* cond_resched_rcu_qs - Report potential quiescent states to RCU |
* |
* This macro resembles cond_resched(), except that it is defined to |
* report potential quiescent states to RCU-tasks even if the cond_resched() |
* machinery were to be shut off, as some advocate for PREEMPT kernels. |
*/ |
#define cond_resched_rcu_qs() \ |
do { \ |
if (!cond_resched()) \ |
rcu_note_voluntary_context_switch(current); \ |
} while (0) |
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) |
bool __rcu_is_watching(void); |
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
/* |
* Infrastructure to implement the synchronize_() primitives in |
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU. |
*/ |
typedef void call_rcu_func_t(struct rcu_head *head, |
void (*func)(struct rcu_head *head)); |
void wait_rcu_gp(call_rcu_func_t crf); |
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
#include <linux/rcutree.h> |
#elif defined(CONFIG_TINY_RCU) |
#include <linux/rcutiny.h> |
#else |
#error "Unknown RCU implementation specified to kernel configuration" |
#endif |
/* |
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
* initialization and destruction of rcu_head on the stack. rcu_head structures |
* allocated dynamically in the heap or defined statically don't need any |
* initialization. |
*/ |
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
void init_rcu_head(struct rcu_head *head); |
void destroy_rcu_head(struct rcu_head *head); |
void init_rcu_head_on_stack(struct rcu_head *head); |
void destroy_rcu_head_on_stack(struct rcu_head *head); |
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
static inline void init_rcu_head(struct rcu_head *head) |
{ |
} |
static inline void destroy_rcu_head(struct rcu_head *head) |
{ |
} |
static inline void init_rcu_head_on_stack(struct rcu_head *head) |
{ |
} |
static inline void destroy_rcu_head_on_stack(struct rcu_head *head) |
{ |
} |
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) |
bool rcu_lockdep_current_cpu_online(void); |
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
static inline bool rcu_lockdep_current_cpu_online(void) |
{ |
return true; |
} |
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
static inline void rcu_lock_acquire(struct lockdep_map *map) |
{ |
lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); |
} |
static inline void rcu_lock_release(struct lockdep_map *map) |
{ |
lock_release(map, 1, _THIS_IP_); |
} |
extern struct lockdep_map rcu_lock_map; |
extern struct lockdep_map rcu_bh_lock_map; |
extern struct lockdep_map rcu_sched_lock_map; |
extern struct lockdep_map rcu_callback_map; |
int debug_lockdep_rcu_enabled(void); |
int rcu_read_lock_held(void); |
int rcu_read_lock_bh_held(void); |
/** |
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
* |
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
* RCU-sched read-side critical section. In absence of |
* CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
* critical section unless it can prove otherwise. Note that disabling |
* of preemption (including disabling irqs) counts as an RCU-sched |
* read-side critical section. This is useful for debug checks in functions |
* that required that they be called within an RCU-sched read-side |
* critical section. |
* |
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
* and while lockdep is disabled. |
* |
* Note that if the CPU is in the idle loop from an RCU point of |
* view (ie: that we are in the section between rcu_idle_enter() and |
* rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU |
* did an rcu_read_lock(). The reason for this is that RCU ignores CPUs |
* that are in such a section, considering these as in extended quiescent |
* state, so such a CPU is effectively never in an RCU read-side critical |
* section regardless of what RCU primitives it invokes. This state of |
* affairs is required --- we need to keep an RCU-free window in idle |
* where the CPU may possibly enter into low power mode. This way we can |
* notice an extended quiescent state to other CPUs that started a grace |
* period. Otherwise we would delay any grace period as long as we run in |
* the idle task. |
* |
* Similarly, we avoid claiming an SRCU read lock held if the current |
* CPU is offline. |
*/ |
#ifdef CONFIG_PREEMPT_COUNT |
static inline int rcu_read_lock_sched_held(void) |
{ |
int lockdep_opinion = 0; |
if (!debug_lockdep_rcu_enabled()) |
return 1; |
if (!rcu_is_watching()) |
return 0; |
if (!rcu_lockdep_current_cpu_online()) |
return 0; |
if (debug_locks) |
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
return lockdep_opinion || preempt_count() != 0 || irqs_disabled(); |
} |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
static inline int rcu_read_lock_sched_held(void) |
{ |
return 1; |
} |
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ |
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
# define rcu_lock_acquire(a) do { } while (0) |
# define rcu_lock_release(a) do { } while (0) |
static inline int rcu_read_lock_held(void) |
{ |
return 1; |
} |
static inline int rcu_read_lock_bh_held(void) |
{ |
return 1; |
} |
#ifdef CONFIG_PREEMPT_COUNT |
static inline int rcu_read_lock_sched_held(void) |
{ |
return preempt_count() != 0 || irqs_disabled(); |
} |
#else /* #ifdef CONFIG_PREEMPT_COUNT */ |
static inline int rcu_read_lock_sched_held(void) |
{ |
return 1; |
} |
#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */ |
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
#ifdef CONFIG_PROVE_RCU |
/** |
* rcu_lockdep_assert - emit lockdep splat if specified condition not met |
* @c: condition to check |
* @s: informative message |
*/ |
#define rcu_lockdep_assert(c, s) \ |
do { \ |
static bool __section(.data.unlikely) __warned; \ |
if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
__warned = true; \ |
lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ |
} \ |
} while (0) |
#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) |
static inline void rcu_preempt_sleep_check(void) |
{ |
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), |
"Illegal context switch in RCU read-side critical section"); |
} |
#else /* #ifdef CONFIG_PROVE_RCU */ |
static inline void rcu_preempt_sleep_check(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_PROVE_RCU */ |
#define rcu_sleep_check() \ |
do { \ |
rcu_preempt_sleep_check(); \ |
rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map), \ |
"Illegal context switch in RCU-bh read-side critical section"); \ |
rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map), \ |
"Illegal context switch in RCU-sched read-side critical section"); \ |
} while (0) |
#else /* #ifdef CONFIG_PROVE_RCU */ |
#define rcu_lockdep_assert(c, s) do { } while (0) |
#define rcu_sleep_check() do { } while (0) |
#endif /* #else #ifdef CONFIG_PROVE_RCU */ |
/* |
* Helper functions for rcu_dereference_check(), rcu_dereference_protected() |
* and rcu_assign_pointer(). Some of these could be folded into their |
* callers, but they are left separate in order to ease introduction of |
* multiple flavors of pointers to match the multiple flavors of RCU |
* (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in |
* the future. |
*/ |
#ifdef __CHECKER__ |
#define rcu_dereference_sparse(p, space) \ |
((void)(((typeof(*p) space *)p) == p)) |
#else /* #ifdef __CHECKER__ */ |
#define rcu_dereference_sparse(p, space) |
#endif /* #else #ifdef __CHECKER__ */ |
#define __rcu_access_pointer(p, space) \ |
({ \ |
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ |
rcu_dereference_sparse(p, space); \ |
((typeof(*p) __force __kernel *)(_________p1)); \ |
}) |
#define __rcu_dereference_check(p, c, space) \ |
({ \ |
typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \ |
rcu_lockdep_assert(c, "suspicious rcu_dereference_check() usage"); \ |
rcu_dereference_sparse(p, space); \ |
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
((typeof(*p) __force __kernel *)(_________p1)); \ |
}) |
#define __rcu_dereference_protected(p, c, space) \ |
({ \ |
rcu_lockdep_assert(c, "suspicious rcu_dereference_protected() usage"); \ |
rcu_dereference_sparse(p, space); \ |
((typeof(*p) __force __kernel *)(p)); \ |
}) |
#define __rcu_access_index(p, space) \ |
({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
rcu_dereference_sparse(p, space); \ |
(_________p1); \ |
}) |
#define __rcu_dereference_index_check(p, c) \ |
({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
rcu_lockdep_assert(c, \ |
"suspicious rcu_dereference_index_check() usage"); \ |
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
(_________p1); \ |
}) |
/** |
* RCU_INITIALIZER() - statically initialize an RCU-protected global variable |
* @v: The value to statically initialize with. |
*/ |
#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) |
/** |
* lockless_dereference() - safely load a pointer for later dereference |
* @p: The pointer to load |
* |
* Similar to rcu_dereference(), but for situations where the pointed-to |
* object's lifetime is managed by something other than RCU. That |
* "something other" might be reference counting or simple immortality. |
*/ |
#define lockless_dereference(p) \ |
({ \ |
typeof(p) _________p1 = ACCESS_ONCE(p); \ |
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
(_________p1); \ |
}) |
/** |
* rcu_assign_pointer() - assign to RCU-protected pointer |
* @p: pointer to assign to |
* @v: value to assign (publish) |
* |
* Assigns the specified value to the specified RCU-protected |
* pointer, ensuring that any concurrent RCU readers will see |
* any prior initialization. |
* |
* Inserts memory barriers on architectures that require them |
* (which is most of them), and also prevents the compiler from |
* reordering the code that initializes the structure after the pointer |
* assignment. More importantly, this call documents which pointers |
* will be dereferenced by RCU read-side code. |
* |
* In some special cases, you may use RCU_INIT_POINTER() instead |
* of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due |
* to the fact that it does not constrain either the CPU or the compiler. |
* That said, using RCU_INIT_POINTER() when you should have used |
* rcu_assign_pointer() is a very bad thing that results in |
* impossible-to-diagnose memory corruption. So please be careful. |
* See the RCU_INIT_POINTER() comment header for details. |
* |
* Note that rcu_assign_pointer() evaluates each of its arguments only |
* once, appearances notwithstanding. One of the "extra" evaluations |
* is in typeof() and the other visible only to sparse (__CHECKER__), |
* neither of which actually execute the argument. As with most cpp |
* macros, this execute-arguments-only-once property is important, so |
* please be careful when making changes to rcu_assign_pointer() and the |
* other macros that it invokes. |
*/ |
#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v)) |
/** |
* rcu_access_pointer() - fetch RCU pointer with no dereferencing |
* @p: The pointer to read |
* |
* Return the value of the specified RCU-protected pointer, but omit the |
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful |
* when the value of this pointer is accessed, but the pointer is not |
* dereferenced, for example, when testing an RCU-protected pointer against |
* NULL. Although rcu_access_pointer() may also be used in cases where |
* update-side locks prevent the value of the pointer from changing, you |
* should instead use rcu_dereference_protected() for this use case. |
* |
* It is also permissible to use rcu_access_pointer() when read-side |
* access to the pointer was removed at least one grace period ago, as |
* is the case in the context of the RCU callback that is freeing up |
* the data, or after a synchronize_rcu() returns. This can be useful |
* when tearing down multi-linked structures after a grace period |
* has elapsed. |
*/ |
#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) |
/** |
* rcu_dereference_check() - rcu_dereference with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* Do an rcu_dereference(), but check that the conditions under which the |
* dereference will take place are correct. Typically the conditions |
* indicate the various locking conditions that should be held at that |
* point. The check should return true if the conditions are satisfied. |
* An implicit check for being in an RCU read-side critical section |
* (rcu_read_lock()) is included. |
* |
* For example: |
* |
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
* |
* could be used to indicate to lockdep that foo->bar may only be dereferenced |
* if either rcu_read_lock() is held, or that the lock required to replace |
* the bar struct at foo->bar is held. |
* |
* Note that the list of conditions may also include indications of when a lock |
* need not be held, for example during initialisation or destruction of the |
* target struct: |
* |
* bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
* atomic_read(&foo->usage) == 0); |
* |
* Inserts memory barriers on architectures that require them |
* (currently only the Alpha), prevents the compiler from refetching |
* (and from merging fetches), and, more importantly, documents exactly |
* which pointers are protected by RCU and checks that the pointer is |
* annotated as __rcu. |
*/ |
#define rcu_dereference_check(p, c) \ |
__rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) |
/** |
* rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* This is the RCU-bh counterpart to rcu_dereference_check(). |
*/ |
#define rcu_dereference_bh_check(p, c) \ |
__rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) |
/** |
* rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* This is the RCU-sched counterpart to rcu_dereference_check(). |
*/ |
#define rcu_dereference_sched_check(p, c) \ |
__rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ |
__rcu) |
#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ |
/* |
* The tracing infrastructure traces RCU (we want that), but unfortunately |
* some of the RCU checks causes tracing to lock up the system. |
* |
* The tracing version of rcu_dereference_raw() must not call |
* rcu_read_lock_held(). |
*/ |
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) |
/** |
* rcu_access_index() - fetch RCU index with no dereferencing |
* @p: The index to read |
* |
* Return the value of the specified RCU-protected index, but omit the |
* smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful |
* when the value of this index is accessed, but the index is not |
* dereferenced, for example, when testing an RCU-protected index against |
* -1. Although rcu_access_index() may also be used in cases where |
* update-side locks prevent the value of the index from changing, you |
* should instead use rcu_dereference_index_protected() for this use case. |
*/ |
#define rcu_access_index(p) __rcu_access_index((p), __rcu) |
/** |
* rcu_dereference_index_check() - rcu_dereference for indices with debug checking |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* Similar to rcu_dereference_check(), but omits the sparse checking. |
* This allows rcu_dereference_index_check() to be used on integers, |
* which can then be used as array indices. Attempting to use |
* rcu_dereference_check() on an integer will give compiler warnings |
* because the sparse address-space mechanism relies on dereferencing |
* the RCU-protected pointer. Dereferencing integers is not something |
* that even gcc will put up with. |
* |
* Note that this function does not implicitly check for RCU read-side |
* critical sections. If this function gains lots of uses, it might |
* make sense to provide versions for each flavor of RCU, but it does |
* not make sense as of early 2010. |
*/ |
#define rcu_dereference_index_check(p, c) \ |
__rcu_dereference_index_check((p), (c)) |
/** |
* rcu_dereference_protected() - fetch RCU pointer when updates prevented |
* @p: The pointer to read, prior to dereferencing |
* @c: The conditions under which the dereference will take place |
* |
* Return the value of the specified RCU-protected pointer, but omit |
* both the smp_read_barrier_depends() and the ACCESS_ONCE(). This |
* is useful in cases where update-side locks prevent the value of the |
* pointer from changing. Please note that this primitive does -not- |
* prevent the compiler from repeating this reference or combining it |
* with other references, so it should not be used without protection |
* of appropriate locks. |
* |
* This function is only for update-side use. Using this function |
* when protected only by rcu_read_lock() will result in infrequent |
* but very ugly failures. |
*/ |
#define rcu_dereference_protected(p, c) \ |
__rcu_dereference_protected((p), (c), __rcu) |
/** |
* rcu_dereference() - fetch RCU-protected pointer for dereferencing |
* @p: The pointer to read, prior to dereferencing |
* |
* This is a simple wrapper around rcu_dereference_check(). |
*/ |
#define rcu_dereference(p) rcu_dereference_check(p, 0) |
/** |
* rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
* @p: The pointer to read, prior to dereferencing |
* |
* Makes rcu_dereference_check() do the dirty work. |
*/ |
#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) |
/** |
* rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing |
* @p: The pointer to read, prior to dereferencing |
* |
* Makes rcu_dereference_check() do the dirty work. |
*/ |
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
/** |
* rcu_read_lock() - mark the beginning of an RCU read-side critical section |
* |
* When synchronize_rcu() is invoked on one CPU while other CPUs |
* are within RCU read-side critical sections, then the |
* synchronize_rcu() is guaranteed to block until after all the other |
* CPUs exit their critical sections. Similarly, if call_rcu() is invoked |
* on one CPU while other CPUs are within RCU read-side critical |
* sections, invocation of the corresponding RCU callback is deferred |
* until after the all the other CPUs exit their critical sections. |
* |
* Note, however, that RCU callbacks are permitted to run concurrently |
* with new RCU read-side critical sections. One way that this can happen |
* is via the following sequence of events: (1) CPU 0 enters an RCU |
* read-side critical section, (2) CPU 1 invokes call_rcu() to register |
* an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
* (4) CPU 2 enters a RCU read-side critical section, (5) the RCU |
* callback is invoked. This is legal, because the RCU read-side critical |
* section that was running concurrently with the call_rcu() (and which |
* therefore might be referencing something that the corresponding RCU |
* callback would free up) has completed before the corresponding |
* RCU callback is invoked. |
* |
* RCU read-side critical sections may be nested. Any deferred actions |
* will be deferred until the outermost RCU read-side critical section |
* completes. |
* |
* You can avoid reading and understanding the next paragraph by |
* following this rule: don't put anything in an rcu_read_lock() RCU |
* read-side critical section that would block in a !PREEMPT kernel. |
* But if you want the full story, read on! |
* |
* In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), |
* it is illegal to block while in an RCU read-side critical section. |
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT |
* kernel builds, RCU read-side critical sections may be preempted, |
* but explicit blocking is illegal. Finally, in preemptible RCU |
* implementations in real-time (with -rt patchset) kernel builds, RCU |
* read-side critical sections may be preempted and they may also block, but |
* only when acquiring spinlocks that are subject to priority inheritance. |
*/ |
static inline void rcu_read_lock(void) |
{ |
__rcu_read_lock(); |
__acquire(RCU); |
rcu_lock_acquire(&rcu_lock_map); |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock() used illegally while idle"); |
} |
/* |
* So where is rcu_write_lock()? It does not exist, as there is no |
* way for writers to lock out RCU readers. This is a feature, not |
* a bug -- this property is what provides RCU's performance benefits. |
* Of course, writers must coordinate with each other. The normal |
* spinlock primitives work well for this, but any other technique may be |
* used as well. RCU does not care how the writers keep out of each |
* others' way, as long as they do so. |
*/ |
/** |
* rcu_read_unlock() - marks the end of an RCU read-side critical section. |
* |
* In most situations, rcu_read_unlock() is immune from deadlock. |
* However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() |
* is responsible for deboosting, which it does via rt_mutex_unlock(). |
* Unfortunately, this function acquires the scheduler's runqueue and |
* priority-inheritance spinlocks. This means that deadlock could result |
* if the caller of rcu_read_unlock() already holds one of these locks or |
* any lock that is ever acquired while holding them; or any lock which |
* can be taken from interrupt context because rcu_boost()->rt_mutex_lock() |
* does not disable irqs while taking ->wait_lock. |
* |
* That said, RCU readers are never priority boosted unless they were |
* preempted. Therefore, one way to avoid deadlock is to make sure |
* that preemption never happens within any RCU read-side critical |
* section whose outermost rcu_read_unlock() is called with one of |
* rt_mutex_unlock()'s locks held. Such preemption can be avoided in |
* a number of ways, for example, by invoking preempt_disable() before |
* critical section's outermost rcu_read_lock(). |
* |
* Given that the set of locks acquired by rt_mutex_unlock() might change |
* at any time, a somewhat more future-proofed approach is to make sure |
* that that preemption never happens within any RCU read-side critical |
* section whose outermost rcu_read_unlock() is called with irqs disabled. |
* This approach relies on the fact that rt_mutex_unlock() currently only |
* acquires irq-disabled locks. |
* |
* The second of these two approaches is best in most situations, |
* however, the first approach can also be useful, at least to those |
* developers willing to keep abreast of the set of locks acquired by |
* rt_mutex_unlock(). |
* |
* See rcu_read_lock() for more information. |
*/ |
static inline void rcu_read_unlock(void) |
{ |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock() used illegally while idle"); |
rcu_lock_release(&rcu_lock_map); |
__release(RCU); |
__rcu_read_unlock(); |
} |
/** |
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
* |
* This is equivalent of rcu_read_lock(), but to be used when updates |
* are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
* both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
* softirq handler to be a quiescent state, a process in RCU read-side |
* critical section must be protected by disabling softirqs. Read-side |
* critical sections in interrupt context can use just rcu_read_lock(), |
* though this should at least be commented to avoid confusing people |
* reading the code. |
* |
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() |
* must occur in the same context, for example, it is illegal to invoke |
* rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() |
* was invoked from some other task. |
*/ |
static inline void rcu_read_lock_bh(void) |
{ |
local_bh_disable(); |
__acquire(RCU_BH); |
rcu_lock_acquire(&rcu_bh_lock_map); |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock_bh() used illegally while idle"); |
} |
/* |
* rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
* |
* See rcu_read_lock_bh() for more information. |
*/ |
static inline void rcu_read_unlock_bh(void) |
{ |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock_bh() used illegally while idle"); |
rcu_lock_release(&rcu_bh_lock_map); |
__release(RCU_BH); |
local_bh_enable(); |
} |
/** |
* rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
* |
* This is equivalent of rcu_read_lock(), but to be used when updates |
* are being done using call_rcu_sched() or synchronize_rcu_sched(). |
* Read-side critical sections can also be introduced by anything that |
* disables preemption, including local_irq_disable() and friends. |
* |
* Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() |
* must occur in the same context, for example, it is illegal to invoke |
* rcu_read_unlock_sched() from process context if the matching |
* rcu_read_lock_sched() was invoked from an NMI handler. |
*/ |
static inline void rcu_read_lock_sched(void) |
{ |
preempt_disable(); |
__acquire(RCU_SCHED); |
rcu_lock_acquire(&rcu_sched_lock_map); |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_lock_sched() used illegally while idle"); |
} |
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
static inline notrace void rcu_read_lock_sched_notrace(void) |
{ |
preempt_disable_notrace(); |
__acquire(RCU_SCHED); |
} |
/* |
* rcu_read_unlock_sched - marks the end of a RCU-classic critical section |
* |
* See rcu_read_lock_sched for more information. |
*/ |
static inline void rcu_read_unlock_sched(void) |
{ |
rcu_lockdep_assert(rcu_is_watching(), |
"rcu_read_unlock_sched() used illegally while idle"); |
rcu_lock_release(&rcu_sched_lock_map); |
__release(RCU_SCHED); |
preempt_enable(); |
} |
/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ |
static inline notrace void rcu_read_unlock_sched_notrace(void) |
{ |
__release(RCU_SCHED); |
preempt_enable_notrace(); |
} |
/** |
* RCU_INIT_POINTER() - initialize an RCU protected pointer |
* |
* Initialize an RCU-protected pointer in special cases where readers |
* do not need ordering constraints on the CPU or the compiler. These |
* special cases are: |
* |
* 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- |
* 2. The caller has taken whatever steps are required to prevent |
* RCU readers from concurrently accessing this pointer -or- |
* 3. The referenced data structure has already been exposed to |
* readers either at compile time or via rcu_assign_pointer() -and- |
* a. You have not made -any- reader-visible changes to |
* this structure since then -or- |
* b. It is OK for readers accessing this structure from its |
* new location to see the old state of the structure. (For |
* example, the changes were to statistical counters or to |
* other state where exact synchronization is not required.) |
* |
* Failure to follow these rules governing use of RCU_INIT_POINTER() will |
* result in impossible-to-diagnose memory corruption. As in the structures |
* will look OK in crash dumps, but any concurrent RCU readers might |
* see pre-initialized values of the referenced data structure. So |
* please be very careful how you use RCU_INIT_POINTER()!!! |
* |
* If you are creating an RCU-protected linked structure that is accessed |
* by a single external-to-structure RCU-protected pointer, then you may |
* use RCU_INIT_POINTER() to initialize the internal RCU-protected |
* pointers, but you must use rcu_assign_pointer() to initialize the |
* external-to-structure pointer -after- you have completely initialized |
* the reader-accessible portions of the linked structure. |
* |
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no |
* ordering guarantees for either the CPU or the compiler. |
*/ |
#define RCU_INIT_POINTER(p, v) \ |
do { \ |
rcu_dereference_sparse(p, __rcu); \ |
p = RCU_INITIALIZER(v); \ |
} while (0) |
/** |
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer |
* |
* GCC-style initialization for an RCU-protected pointer in a structure field. |
*/ |
#define RCU_POINTER_INITIALIZER(p, v) \ |
.p = RCU_INITIALIZER(v) |
/* |
* Does the specified offset indicate that the corresponding rcu_head |
* structure can be handled by kfree_rcu()? |
*/ |
#define __is_kfree_rcu_offset(offset) ((offset) < 4096) |
/* |
* Helper macro for kfree_rcu() to prevent argument-expansion eyestrain. |
*/ |
#define __kfree_rcu(head, offset) \ |
do { \ |
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \ |
kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \ |
} while (0) |
/** |
* kfree_rcu() - kfree an object after a grace period. |
* @ptr: pointer to kfree |
* @rcu_head: the name of the struct rcu_head within the type of @ptr. |
* |
* Many rcu callbacks functions just call kfree() on the base structure. |
* These functions are trivial, but their size adds up, and furthermore |
* when they are used in a kernel module, that module must invoke the |
* high-latency rcu_barrier() function at module-unload time. |
* |
* The kfree_rcu() function handles this issue. Rather than encoding a |
* function address in the embedded rcu_head structure, kfree_rcu() instead |
* encodes the offset of the rcu_head structure within the base structure. |
* Because the functions are not allowed in the low-order 4096 bytes of |
* kernel virtual memory, offsets up to 4095 bytes can be accommodated. |
* If the offset is larger than 4095 bytes, a compile-time error will |
* be generated in __kfree_rcu(). If this error is triggered, you can |
* either fall back to use of call_rcu() or rearrange the structure to |
* position the rcu_head structure into the first 4096 bytes. |
* |
* Note that the allowable offset might decrease in the future, for example, |
* to allow something like kmem_cache_free_rcu(). |
* |
* The BUILD_BUG_ON check must not involve any function calls, hence the |
* checks are done in macros here. |
*/ |
#define kfree_rcu(ptr, rcu_head) \ |
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) |
static inline int rcu_needs_cpu(unsigned long *delta_jiffies) |
{ |
*delta_jiffies = ULONG_MAX; |
return 0; |
} |
#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ |
#if defined(CONFIG_RCU_NOCB_CPU_ALL) |
static inline bool rcu_is_nocb_cpu(int cpu) { return true; } |
#elif defined(CONFIG_RCU_NOCB_CPU) |
bool rcu_is_nocb_cpu(int cpu); |
#else |
static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
#endif |
/* Only for use by adaptive-ticks code. */ |
#ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
bool rcu_sys_is_idle(void); |
void rcu_sysidle_force_exit(void); |
#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
static inline bool rcu_sys_is_idle(void) |
{ |
return false; |
} |
static inline void rcu_sysidle_force_exit(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
#endif /* __LINUX_RCUPDATE_H */ |
/drivers/include/linux/rcutiny.h |
---|
0,0 → 1,160 |
/* |
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License as published by |
* the Free Software Foundation; either version 2 of the License, or |
* (at your option) any later version. |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, you can access it online at |
* http://www.gnu.org/licenses/gpl-2.0.html. |
* |
* Copyright IBM Corporation, 2008 |
* |
* Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
* |
* For detailed explanation of Read-Copy Update mechanism see - |
* Documentation/RCU |
*/ |
#ifndef __LINUX_TINY_H |
#define __LINUX_TINY_H |
#include <linux/cache.h> |
static inline unsigned long get_state_synchronize_rcu(void) |
{ |
return 0; |
} |
static inline void cond_synchronize_rcu(unsigned long oldstate) |
{ |
might_sleep(); |
} |
static inline void rcu_barrier_bh(void) |
{ |
wait_rcu_gp(call_rcu_bh); |
} |
static inline void rcu_barrier_sched(void) |
{ |
wait_rcu_gp(call_rcu_sched); |
} |
static inline void synchronize_rcu_expedited(void) |
{ |
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
} |
static inline void rcu_barrier(void) |
{ |
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
} |
static inline void synchronize_rcu_bh(void) |
{ |
synchronize_sched(); |
} |
static inline void synchronize_rcu_bh_expedited(void) |
{ |
synchronize_sched(); |
} |
static inline void synchronize_sched_expedited(void) |
{ |
synchronize_sched(); |
} |
static inline void kfree_call_rcu(struct rcu_head *head, |
void (*func)(struct rcu_head *rcu)) |
{ |
call_rcu(head, func); |
} |
static inline void rcu_note_context_switch(void) |
{ |
rcu_sched_qs(); |
} |
/* |
* Take advantage of the fact that there is only one CPU, which |
* allows us to ignore virtualization-based context switches. |
*/ |
static inline void rcu_virt_note_context_switch(int cpu) |
{ |
} |
/* |
* Return the number of grace periods. |
*/ |
static inline long rcu_batches_completed(void) |
{ |
return 0; |
} |
/* |
* Return the number of bottom-half grace periods. |
*/ |
static inline long rcu_batches_completed_bh(void) |
{ |
return 0; |
} |
static inline void rcu_force_quiescent_state(void) |
{ |
} |
static inline void rcu_bh_force_quiescent_state(void) |
{ |
} |
static inline void rcu_sched_force_quiescent_state(void) |
{ |
} |
static inline void show_rcu_gp_kthreads(void) |
{ |
} |
static inline void rcu_cpu_stall_reset(void) |
{ |
} |
static inline void exit_rcu(void) |
{ |
} |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
extern int rcu_scheduler_active __read_mostly; |
void rcu_scheduler_starting(void); |
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
static inline void rcu_scheduler_starting(void) |
{ |
} |
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
static inline bool rcu_is_watching(void) |
{ |
return __rcu_is_watching(); |
} |
#else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
static inline bool rcu_is_watching(void) |
{ |
return true; |
} |
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
#endif /* __LINUX_RCUTINY_H */ |
/drivers/include/linux/reservation.h |
---|
40,23 → 40,103 |
#define _LINUX_RESERVATION_H |
#include <linux/ww_mutex.h> |
#include <linux/fence.h> |
#include <linux/slab.h> |
#include <linux/seqlock.h> |
#include <linux/rcupdate.h> |
extern struct ww_class reservation_ww_class; |
extern struct lock_class_key reservation_seqcount_class; |
extern const char reservation_seqcount_string[]; |
struct reservation_object_list { |
struct rcu_head rcu; |
u32 shared_count, shared_max; |
struct fence __rcu *shared[]; |
}; |
struct reservation_object { |
struct ww_mutex lock; |
seqcount_t seq; |
struct fence __rcu *fence_excl; |
struct reservation_object_list __rcu *fence; |
struct reservation_object_list *staged; |
}; |
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) |
#define reservation_object_assert_held(obj) \ |
lockdep_assert_held(&(obj)->lock.base) |
static inline void |
reservation_object_init(struct reservation_object *obj) |
{ |
ww_mutex_init(&obj->lock, &reservation_ww_class); |
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); |
RCU_INIT_POINTER(obj->fence, NULL); |
RCU_INIT_POINTER(obj->fence_excl, NULL); |
obj->staged = NULL; |
} |
static inline void |
reservation_object_fini(struct reservation_object *obj) |
{ |
int i; |
struct reservation_object_list *fobj; |
struct fence *excl; |
/* |
* This object should be dead and all references must have |
* been released to it, so no need to be protected with rcu. |
*/ |
excl = rcu_dereference_protected(obj->fence_excl, 1); |
if (excl) |
fence_put(excl); |
fobj = rcu_dereference_protected(obj->fence, 1); |
if (fobj) { |
for (i = 0; i < fobj->shared_count; ++i) |
fence_put(rcu_dereference_protected(fobj->shared[i], 1)); |
kfree(fobj); |
} |
kfree(obj->staged); |
ww_mutex_destroy(&obj->lock); |
} |
static inline struct reservation_object_list * |
reservation_object_get_list(struct reservation_object *obj) |
{ |
return rcu_dereference_protected(obj->fence, |
reservation_object_held(obj)); |
} |
static inline struct fence * |
reservation_object_get_excl(struct reservation_object *obj) |
{ |
return rcu_dereference_protected(obj->fence_excl, |
reservation_object_held(obj)); |
} |
int reservation_object_reserve_shared(struct reservation_object *obj); |
void reservation_object_add_shared_fence(struct reservation_object *obj, |
struct fence *fence); |
void reservation_object_add_excl_fence(struct reservation_object *obj, |
struct fence *fence); |
int reservation_object_get_fences_rcu(struct reservation_object *obj, |
struct fence **pfence_excl, |
unsigned *pshared_count, |
struct fence ***pshared); |
long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
bool wait_all, bool intr, |
unsigned long timeout); |
bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
bool test_all); |
#endif /* _LINUX_RESERVATION_H */ |
/drivers/include/linux/scatterlist.h |
---|
101,6 → 101,22 |
return (struct page *)((sg)->page_link & ~0x3); |
} |
/** |
* sg_set_buf - Set sg entry to point at given data |
* @sg: SG entry |
* @buf: Data |
* @buflen: Data length |
* |
**/ |
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
// unsigned int buflen) |
//{ |
//#ifdef CONFIG_DEBUG_SG |
// BUG_ON(!virt_addr_valid(buf)); |
//#endif |
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
//} |
/* |
* Loop over each sg element, following the pointer to a new list if necessary |
*/ |
120,7 → 136,7 |
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, |
struct scatterlist *sgl) |
{ |
#ifndef ARCH_HAS_SG_CHAIN |
#ifndef CONFIG_ARCH_HAS_SG_CHAIN |
BUG(); |
#endif |
/drivers/include/linux/sched.h |
---|
7,5 → 7,6 |
#define TASK_COMM_LEN 16 |
#define schedule_timeout(x) delay(x) |
#define MAX_SCHEDULE_TIMEOUT LONG_MAX |
#endif |
/drivers/include/linux/seq_file.h |
---|
4,5 → 4,6 |
#include <errno.h> |
#endif |
/drivers/include/linux/seqlock.h |
---|
0,0 → 1,478 |
#ifndef __LINUX_SEQLOCK_H |
#define __LINUX_SEQLOCK_H |
/* |
* Reader/writer consistent mechanism without starving writers. This type of |
* lock for data where the reader wants a consistent set of information |
* and is willing to retry if the information changes. There are two types |
* of readers: |
* 1. Sequence readers which never block a writer but they may have to retry |
* if a writer is in progress by detecting change in sequence number. |
* Writers do not wait for a sequence reader. |
* 2. Locking readers which will wait if a writer or another locking reader |
* is in progress. A locking reader in progress will also block a writer |
* from going forward. Unlike the regular rwlock, the read lock here is |
* exclusive so that only one locking reader can get it. |
* |
* This is not as cache friendly as brlock. Also, this may not work well |
* for data that contains pointers, because any writer could |
* invalidate a pointer that a reader was following. |
* |
* Expected non-blocking reader usage: |
* do { |
* seq = read_seqbegin(&foo); |
* ... |
* } while (read_seqretry(&foo, seq)); |
* |
* |
* On non-SMP the spin locks disappear but the writer still needs |
* to increment the sequence variables because an interrupt routine could |
* change the state of the data. |
* |
* Based on x86_64 vsyscall gettimeofday |
* by Keith Owens and Andrea Arcangeli |
*/ |
#include <linux/spinlock.h> |
//#include <linux/preempt.h> |
#include <linux/lockdep.h> |
#include <asm/processor.h> |
/* |
* Version using sequence counter only. |
* This can be used when code has its own mutex protecting the |
* updating starting before the write_seqcountbeqin() and ending |
* after the write_seqcount_end(). |
*/ |
typedef struct seqcount { |
unsigned sequence; |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
} seqcount_t; |
static inline void __seqcount_init(seqcount_t *s, const char *name, |
struct lock_class_key *key) |
{ |
/* |
* Make sure we are not reinitializing a held lock: |
*/ |
lockdep_init_map(&s->dep_map, name, key, 0); |
s->sequence = 0; |
} |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define SEQCOUNT_DEP_MAP_INIT(lockname) \ |
.dep_map = { .name = #lockname } \ |
# define seqcount_init(s) \ |
do { \ |
static struct lock_class_key __key; \ |
__seqcount_init((s), #s, &__key); \ |
} while (0) |
static inline void seqcount_lockdep_reader_access(const seqcount_t *s) |
{ |
seqcount_t *l = (seqcount_t *)s; |
unsigned long flags; |
local_irq_save(flags); |
seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); |
seqcount_release(&l->dep_map, 1, _RET_IP_); |
local_irq_restore(flags); |
} |
#else |
# define SEQCOUNT_DEP_MAP_INIT(lockname) |
# define seqcount_init(s) __seqcount_init(s, NULL, NULL) |
# define seqcount_lockdep_reader_access(x) |
#endif |
#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} |
/** |
* __read_seqcount_begin - begin a seq-read critical section (without barrier) |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() |
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
* provided before actually loading any of the variables that are to be |
* protected in this critical section. |
* |
* Use carefully, only in critical code, and comment how the barrier is |
* provided. |
*/ |
static inline unsigned __read_seqcount_begin(const seqcount_t *s) |
{ |
unsigned ret; |
repeat: |
ret = ACCESS_ONCE(s->sequence); |
if (unlikely(ret & 1)) { |
cpu_relax(); |
goto repeat; |
} |
return ret; |
} |
/** |
* raw_read_seqcount - Read the raw seqcount |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* raw_read_seqcount opens a read critical section of the given |
* seqcount without any lockdep checking and without checking or |
* masking the LSB. Calling code is responsible for handling that. |
*/ |
static inline unsigned raw_read_seqcount(const seqcount_t *s) |
{ |
unsigned ret = ACCESS_ONCE(s->sequence); |
smp_rmb(); |
return ret; |
} |
/** |
* raw_read_seqcount_begin - start seq-read critical section w/o lockdep |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* raw_read_seqcount_begin opens a read critical section of the given |
* seqcount, but without any lockdep checking. Validity of the critical |
* section is tested by checking read_seqcount_retry function. |
*/ |
static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) |
{ |
unsigned ret = __read_seqcount_begin(s); |
smp_rmb(); |
return ret; |
} |
/** |
* read_seqcount_begin - begin a seq-read critical section |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* read_seqcount_begin opens a read critical section of the given seqcount. |
* Validity of the critical section is tested by checking read_seqcount_retry |
* function. |
*/ |
static inline unsigned read_seqcount_begin(const seqcount_t *s) |
{ |
seqcount_lockdep_reader_access(s); |
return raw_read_seqcount_begin(s); |
} |
/** |
* raw_seqcount_begin - begin a seq-read critical section |
* @s: pointer to seqcount_t |
* Returns: count to be passed to read_seqcount_retry |
* |
* raw_seqcount_begin opens a read critical section of the given seqcount. |
* Validity of the critical section is tested by checking read_seqcount_retry |
* function. |
* |
* Unlike read_seqcount_begin(), this function will not wait for the count |
* to stabilize. If a writer is active when we begin, we will fail the |
* read_seqcount_retry() instead of stabilizing at the beginning of the |
* critical section. |
*/ |
static inline unsigned raw_seqcount_begin(const seqcount_t *s) |
{ |
unsigned ret = ACCESS_ONCE(s->sequence); |
smp_rmb(); |
return ret & ~1; |
} |
/** |
* __read_seqcount_retry - end a seq-read critical section (without barrier) |
* @s: pointer to seqcount_t |
* @start: count, from read_seqcount_begin |
* Returns: 1 if retry is required, else 0 |
* |
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() |
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is |
* provided before actually loading any of the variables that are to be |
* protected in this critical section. |
* |
* Use carefully, only in critical code, and comment how the barrier is |
* provided. |
*/ |
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) |
{ |
return unlikely(s->sequence != start); |
} |
/** |
* read_seqcount_retry - end a seq-read critical section |
* @s: pointer to seqcount_t |
* @start: count, from read_seqcount_begin |
* Returns: 1 if retry is required, else 0 |
* |
* read_seqcount_retry closes a read critical section of the given seqcount. |
* If the critical section was invalid, it must be ignored (and typically |
* retried). |
*/ |
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
{ |
smp_rmb(); |
return __read_seqcount_retry(s, start); |
} |
static inline void raw_write_seqcount_begin(seqcount_t *s) |
{ |
s->sequence++; |
smp_wmb(); |
} |
static inline void raw_write_seqcount_end(seqcount_t *s) |
{ |
smp_wmb(); |
s->sequence++; |
} |
/* |
* raw_write_seqcount_latch - redirect readers to even/odd copy |
* @s: pointer to seqcount_t |
*/ |
static inline void raw_write_seqcount_latch(seqcount_t *s) |
{ |
smp_wmb(); /* prior stores before incrementing "sequence" */ |
s->sequence++; |
smp_wmb(); /* increment "sequence" before following stores */ |
} |
/* |
* Sequence counter only version assumes that callers are using their |
* own mutexing. |
*/ |
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) |
{ |
raw_write_seqcount_begin(s); |
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
} |
static inline void write_seqcount_begin(seqcount_t *s) |
{ |
write_seqcount_begin_nested(s, 0); |
} |
static inline void write_seqcount_end(seqcount_t *s) |
{ |
seqcount_release(&s->dep_map, 1, _RET_IP_); |
raw_write_seqcount_end(s); |
} |
/** |
* write_seqcount_barrier - invalidate in-progress read-side seq operations |
* @s: pointer to seqcount_t |
* |
* After write_seqcount_barrier, no read-side seq operations will complete |
* successfully and see data older than this. |
*/ |
static inline void write_seqcount_barrier(seqcount_t *s) |
{ |
smp_wmb(); |
s->sequence+=2; |
} |
typedef struct { |
struct seqcount seqcount; |
spinlock_t lock; |
} seqlock_t; |
/* |
* These macros triggered gcc-3.x compile-time problems. We think these are |
* OK now. Be cautious. |
*/ |
#define __SEQLOCK_UNLOCKED(lockname) \ |
{ \ |
.seqcount = SEQCNT_ZERO(lockname), \ |
.lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
} |
#define seqlock_init(x) \ |
do { \ |
seqcount_init(&(x)->seqcount); \ |
spin_lock_init(&(x)->lock); \ |
} while (0) |
#define DEFINE_SEQLOCK(x) \ |
seqlock_t x = __SEQLOCK_UNLOCKED(x) |
/* |
* Read side functions for starting and finalizing a read side section. |
*/ |
static inline unsigned read_seqbegin(const seqlock_t *sl) |
{ |
return read_seqcount_begin(&sl->seqcount); |
} |
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) |
{ |
return read_seqcount_retry(&sl->seqcount, start); |
} |
/* |
* Lock out other writers and update the count. |
* Acts like a normal spin_lock/unlock. |
* Don't need preempt_disable() because that is in the spin_lock already. |
*/ |
static inline void write_seqlock(seqlock_t *sl) |
{ |
spin_lock(&sl->lock); |
write_seqcount_begin(&sl->seqcount); |
} |
static inline void write_sequnlock(seqlock_t *sl) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock(&sl->lock); |
} |
static inline void write_seqlock_bh(seqlock_t *sl) |
{ |
spin_lock_bh(&sl->lock); |
write_seqcount_begin(&sl->seqcount); |
} |
static inline void write_sequnlock_bh(seqlock_t *sl) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock_bh(&sl->lock); |
} |
static inline void write_seqlock_irq(seqlock_t *sl) |
{ |
spin_lock_irq(&sl->lock); |
write_seqcount_begin(&sl->seqcount); |
} |
static inline void write_sequnlock_irq(seqlock_t *sl) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock_irq(&sl->lock); |
} |
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) |
{ |
unsigned long flags; |
spin_lock_irqsave(&sl->lock, flags); |
write_seqcount_begin(&sl->seqcount); |
return flags; |
} |
#define write_seqlock_irqsave(lock, flags) \ |
do { flags = __write_seqlock_irqsave(lock); } while (0) |
static inline void |
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) |
{ |
write_seqcount_end(&sl->seqcount); |
spin_unlock_irqrestore(&sl->lock, flags); |
} |
/* |
* A locking reader exclusively locks out other writers and locking readers, |
* but doesn't update the sequence number. Acts like a normal spin_lock/unlock. |
* Don't need preempt_disable() because that is in the spin_lock already. |
*/ |
static inline void read_seqlock_excl(seqlock_t *sl) |
{ |
spin_lock(&sl->lock); |
} |
static inline void read_sequnlock_excl(seqlock_t *sl) |
{ |
spin_unlock(&sl->lock); |
} |
/** |
* read_seqbegin_or_lock - begin a sequence number check or locking block |
* @lock: sequence lock |
* @seq : sequence number to be checked |
* |
* First try it once optimistically without taking the lock. If that fails, |
* take the lock. The sequence number is also used as a marker for deciding |
* whether to be a reader (even) or writer (odd). |
* N.B. seq must be initialized to an even number to begin with. |
*/ |
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) |
{ |
if (!(*seq & 1)) /* Even */ |
*seq = read_seqbegin(lock); |
else /* Odd */ |
read_seqlock_excl(lock); |
} |
static inline int need_seqretry(seqlock_t *lock, int seq) |
{ |
return !(seq & 1) && read_seqretry(lock, seq); |
} |
static inline void done_seqretry(seqlock_t *lock, int seq) |
{ |
if (seq & 1) |
read_sequnlock_excl(lock); |
} |
static inline void read_seqlock_excl_bh(seqlock_t *sl) |
{ |
spin_lock_bh(&sl->lock); |
} |
static inline void read_sequnlock_excl_bh(seqlock_t *sl) |
{ |
spin_unlock_bh(&sl->lock); |
} |
static inline void read_seqlock_excl_irq(seqlock_t *sl) |
{ |
spin_lock_irq(&sl->lock); |
} |
static inline void read_sequnlock_excl_irq(seqlock_t *sl) |
{ |
spin_unlock_irq(&sl->lock); |
} |
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) |
{ |
unsigned long flags; |
spin_lock_irqsave(&sl->lock, flags); |
return flags; |
} |
#define read_seqlock_excl_irqsave(lock, flags) \ |
do { flags = __read_seqlock_excl_irqsave(lock); } while (0) |
static inline void |
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) |
{ |
spin_unlock_irqrestore(&sl->lock, flags); |
} |
static inline unsigned long |
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) |
{ |
unsigned long flags = 0; |
if (!(*seq & 1)) /* Even */ |
*seq = read_seqbegin(lock); |
else /* Odd */ |
read_seqlock_excl_irqsave(lock, flags); |
return flags; |
} |
static inline void |
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) |
{ |
if (seq & 1) |
read_sequnlock_excl_irqrestore(lock, flags); |
} |
#endif /* __LINUX_SEQLOCK_H */ |
/drivers/include/linux/shmem_fs.h |
---|
1,8 → 1,9 |
#ifndef __SHMEM_FS_H |
#define __SHMEM_FS_H |
#include <kernel.h> |
#include <linux/file.h> |
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); |
struct page *shmem_read_mapping_page_gfp(struct file *filep, |
pgoff_t index, gfp_t gfp); |
/drivers/include/linux/slab.h |
---|
11,6 → 11,140 |
#ifndef _LINUX_SLAB_H |
#define _LINUX_SLAB_H |
#include <errno.h> |
// stub |
#include <linux/gfp.h> |
#include <linux/types.h> |
#include <linux/workqueue.h> |
/* |
* Flags to pass to kmem_cache_create(). |
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. |
*/ |
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ |
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ |
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ |
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ |
/* |
* SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! |
* |
* This delays freeing the SLAB page by a grace period, it does _NOT_ |
* delay object freeing. This means that if you do kmem_cache_free() |
* that memory location is free to be reused at any time. Thus it may |
* be possible to see another object there in the same RCU grace period. |
* |
* This feature only ensures the memory location backing the object |
* stays valid, the trick to using this is relying on an independent |
* object validation pass. Something like: |
* |
* rcu_read_lock() |
* again: |
* obj = lockless_lookup(key); |
* if (obj) { |
* if (!try_get_ref(obj)) // might fail for free objects |
* goto again; |
* |
* if (obj->key != key) { // not the object we expected |
* put_ref(obj); |
* goto again; |
* } |
* } |
* rcu_read_unlock(); |
* |
* This is useful if we need to approach a kernel structure obliquely, |
* from its address obtained without the usual locking. We can lock |
* the structure to stabilize it and check it's still at the given address, |
* only if we can be sure that the memory has not been meanwhile reused |
* for some other kind of object (which our subsystem's lock might corrupt). |
* |
* rcu_read_lock before reading the address, then rcu_read_unlock after |
* taking the spinlock within the structure expected at that address. |
*/ |
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
/* Flag to prevent checks on free */ |
#ifdef CONFIG_DEBUG_OBJECTS |
# define SLAB_DEBUG_OBJECTS 0x00400000UL |
#else |
# define SLAB_DEBUG_OBJECTS 0x00000000UL |
#endif |
#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ |
/* Don't track use of uninitialized memory */ |
#ifdef CONFIG_KMEMCHECK |
# define SLAB_NOTRACK 0x01000000UL |
#else |
# define SLAB_NOTRACK 0x00000000UL |
#endif |
#ifdef CONFIG_FAILSLAB |
# define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ |
#else |
# define SLAB_FAILSLAB 0x00000000UL |
#endif |
/* The following flags affect the page allocator grouping pages by mobility */ |
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
/* |
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. |
* |
* Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. |
* |
* ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. |
* Both make kfree a no-op. |
*/ |
#define ZERO_SIZE_PTR ((void *)16) |
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
(unsigned long)ZERO_SIZE_PTR) |
void __init kmem_cache_init(void); |
int slab_is_available(void); |
void kmem_cache_destroy(struct kmem_cache *); |
int kmem_cache_shrink(struct kmem_cache *); |
void kmem_cache_free(struct kmem_cache *, void *); |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
{ |
return __builtin_realloc(p, new_size); |
} |
static inline void kfree(void *p) |
{ |
__builtin_free(p); |
} |
static __always_inline void *kmalloc(size_t size, gfp_t flags) |
{ |
return __builtin_malloc(size); |
} |
/** |
* kzalloc - allocate memory. The memory is set to zero. |
* @size: how many bytes of memory are required. |
* @flags: the type of memory to allocate (see kmalloc). |
*/ |
static inline void *kzalloc(size_t size, gfp_t flags) |
{ |
void *ret = __builtin_malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
static inline void *kcalloc(size_t n, size_t size, uint32_t flags) |
{ |
return (void*)kzalloc(n * size, 0); |
} |
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
{ |
// if (size != 0 && n > SIZE_MAX / size) |
// return NULL; |
return (void*)kmalloc(n * size, flags); |
} |
#endif /* _LINUX_SLAB_H */ |
/drivers/include/linux/spinlock.h |
---|
48,14 → 48,14 |
#include <linux/typecheck.h> |
//#include <linux/preempt.h> |
//#include <linux/linkage.h> |
#include <linux/linkage.h> |
#include <linux/compiler.h> |
//#include <linux/thread_info.h> |
#include <linux/kernel.h> |
#include <linux/stringify.h> |
//#include <linux/bottom_half.h> |
#include <asm/barrier.h> |
//#include <asm/system.h> |
/* |
* Must define these before including other files, inline functions need them |
/drivers/include/linux/stddef.h |
---|
1,8 → 1,9 |
#ifndef _LINUX_STDDEF_H |
#define _LINUX_STDDEF_H |
#include <linux/compiler.h> |
#include <uapi/linux/stddef.h> |
#undef NULL |
#define NULL ((void *)0) |
/drivers/include/linux/string.h |
---|
6,6 → 6,7 |
#include <linux/types.h> /* for size_t */ |
#include <linux/stddef.h> /* for NULL */ |
#include <stdarg.h> |
#include <uapi/linux/string.h> |
extern char *strndup_user(const char __user *, long); |
extern void *memdup_user(const void __user *, size_t); |
40,7 → 41,7 |
extern int strncmp(const char *,const char *,__kernel_size_t); |
#endif |
#ifndef __HAVE_ARCH_STRNICMP |
extern int strnicmp(const char *, const char *, __kernel_size_t); |
#define strnicmp strncasecmp |
#endif |
#ifndef __HAVE_ARCH_STRCASECMP |
extern int strcasecmp(const char *s1, const char *s2); |
143,7 → 144,8 |
return strncmp(str, prefix, strlen(prefix)) == 0; |
} |
extern size_t memweight(const void *ptr, size_t bytes); |
size_t memweight(const void *ptr, size_t bytes); |
void memzero_explicit(void *s, size_t count); |
/** |
* kbasename - return the last part of a pathname. |
/drivers/include/linux/threads.h |
---|
0,0 → 1,45 |
#ifndef _LINUX_THREADS_H |
#define _LINUX_THREADS_H |
/* |
* The default limit for the nr of threads is now in |
* /proc/sys/kernel/threads-max. |
*/ |
/* |
* Maximum supported processors. Setting this smaller saves quite a |
* bit of memory. Use nr_cpu_ids instead of this except for static bitmaps. |
*/ |
#ifndef CONFIG_NR_CPUS |
/* FIXME: This should be fixed in the arch's Kconfig */ |
#define CONFIG_NR_CPUS 1 |
#endif |
/* Places which use this should consider cpumask_var_t. */ |
#define NR_CPUS CONFIG_NR_CPUS |
#define MIN_THREADS_LEFT_FOR_ROOT 4 |
/* |
* This controls the default maximum pid allocated to a process |
*/ |
#define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) |
/* |
* A maximum of 4 million PIDs should be enough for a while. |
* [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] |
*/ |
#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ |
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) |
/* |
* Define a minimum number of pids per cpu. Heuristically based |
* on original pid max of 32k for 32 cpus. Also, increase the |
* minimum settable value for pid_max on the running system based |
* on similar defaults. See kernel/pid.c:pidmap_init() for details. |
*/ |
#define PIDS_PER_CPU_DEFAULT 1024 |
#define PIDS_PER_CPU_MIN 8 |
#endif |
/drivers/include/linux/time.h |
---|
1,22 → 1,13 |
#ifndef _LINUX_TIME_H |
#define _LINUX_TIME_H |
//# include <linux/cache.h> |
//# include <linux/seqlock.h> |
# include <linux/cache.h> |
# include <linux/seqlock.h> |
# include <linux/math64.h> |
//#include <uapi/linux/time.h> |
# include <linux/time64.h> |
extern struct timezone sys_tz; |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
static inline int timespec_equal(const struct timespec *a, |
48,10 → 39,21 |
return lhs->tv_usec - rhs->tv_usec; |
} |
extern unsigned long mktime(const unsigned int year, const unsigned int mon, |
extern time64_t mktime64(const unsigned int year, const unsigned int mon, |
const unsigned int day, const unsigned int hour, |
const unsigned int min, const unsigned int sec); |
/** |
* Deprecated. Use mktime64(). |
*/ |
static inline unsigned long mktime(const unsigned int year, |
const unsigned int mon, const unsigned int day, |
const unsigned int hour, const unsigned int min, |
const unsigned int sec) |
{ |
return mktime64(year, mon, day, hour, min, sec); |
} |
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
/* |
84,13 → 86,6 |
return ts_delta; |
} |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#if (BITS_PER_LONG == 64) |
# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#else |
# define KTIME_SEC_MAX LONG_MAX |
#endif |
/* |
* Returns true if the timespec is norm, false if denorm: |
*/ |
115,28 → 110,8 |
return true; |
} |
extern bool persistent_clock_exist; |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
static inline bool has_persistent_clock(void) |
{ |
return persistent_clock_exist; |
} |
extern void read_persistent_clock(struct timespec *ts); |
extern void read_boot_clock(struct timespec *ts); |
extern int persistent_clock_is_local; |
extern int update_persistent_clock(struct timespec now); |
void timekeeping_init(void); |
extern int timekeeping_suspended; |
unsigned long get_seconds(void); |
struct timespec current_kernel_time(void); |
struct timespec __current_kernel_time(void); /* does not take xtime_lock */ |
struct timespec get_monotonic_coarse(void); |
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
struct timespec *wtom, struct timespec *sleep); |
void timekeeping_inject_sleeptime(struct timespec *delta); |
#define CURRENT_TIME (current_kernel_time()) |
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
153,34 → 128,15 |
extern u32 (*arch_gettimeoffset)(void); |
#endif |
extern void do_gettimeofday(struct timeval *tv); |
extern int do_settimeofday(const struct timespec *tv); |
extern int do_sys_settimeofday(const struct timespec *tv, |
const struct timezone *tz); |
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct itimerval; |
extern int do_setitimer(int which, struct itimerval *value, |
struct itimerval *ovalue); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern int do_getitimer(int which, struct itimerval *value); |
extern int __getnstimeofday(struct timespec *tv); |
extern void getnstimeofday(struct timespec *tv); |
extern void getrawmonotonic(struct timespec *ts); |
extern void getnstime_raw_and_real(struct timespec *ts_raw, |
struct timespec *ts_real); |
extern void getboottime(struct timespec *ts); |
extern void monotonic_to_bootbased(struct timespec *ts); |
extern void get_monotonic_boottime(struct timespec *ts); |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
extern int timekeeping_valid_for_hres(void); |
extern u64 timekeeping_max_deferment(void); |
extern int timekeeping_inject_offset(struct timespec *ts); |
extern s32 timekeeping_get_tai_offset(void); |
extern void timekeeping_set_tai_offset(s32 tai_offset); |
extern void timekeeping_clocktai(struct timespec *ts); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct tms; |
extern void do_sys_times(struct tms *); |
/drivers/include/linux/time64.h |
---|
0,0 → 1,190 |
#ifndef _LINUX_TIME64_H |
#define _LINUX_TIME64_H |
#include <uapi/linux/time.h> |
typedef __s64 time64_t; |
/* |
* This wants to go into uapi/linux/time.h once we agreed about the |
* userspace interfaces. |
*/ |
#if __BITS_PER_LONG == 64 |
# define timespec64 timespec |
#else |
struct timespec64 { |
time64_t tv_sec; /* seconds */ |
long tv_nsec; /* nanoseconds */ |
}; |
#endif |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
/* Located here for timespec[64]_valid_strict */ |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#if __BITS_PER_LONG == 64 |
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) |
{ |
return ts64; |
} |
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) |
{ |
return ts; |
} |
# define timespec64_equal timespec_equal |
# define timespec64_compare timespec_compare |
# define set_normalized_timespec64 set_normalized_timespec |
# define timespec64_add_safe timespec_add_safe |
# define timespec64_add timespec_add |
# define timespec64_sub timespec_sub |
# define timespec64_valid timespec_valid |
# define timespec64_valid_strict timespec_valid_strict |
# define timespec64_to_ns timespec_to_ns |
# define ns_to_timespec64 ns_to_timespec |
# define timespec64_add_ns timespec_add_ns |
#else |
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) |
{ |
struct timespec ret; |
ret.tv_sec = (time_t)ts64.tv_sec; |
ret.tv_nsec = ts64.tv_nsec; |
return ret; |
} |
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) |
{ |
struct timespec64 ret; |
ret.tv_sec = ts.tv_sec; |
ret.tv_nsec = ts.tv_nsec; |
return ret; |
} |
static inline int timespec64_equal(const struct timespec64 *a, |
const struct timespec64 *b) |
{ |
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); |
} |
/* |
* lhs < rhs: return <0 |
* lhs == rhs: return 0 |
* lhs > rhs: return >0 |
*/ |
static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) |
{ |
if (lhs->tv_sec < rhs->tv_sec) |
return -1; |
if (lhs->tv_sec > rhs->tv_sec) |
return 1; |
return lhs->tv_nsec - rhs->tv_nsec; |
} |
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); |
/* |
* timespec64_add_safe assumes both values are positive and checks for |
* overflow. It will return TIME_T_MAX if the returned value would be |
* smaller then either of the arguments. |
*/ |
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, |
const struct timespec64 rhs); |
static inline struct timespec64 timespec64_add(struct timespec64 lhs, |
struct timespec64 rhs) |
{ |
struct timespec64 ts_delta; |
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, |
lhs.tv_nsec + rhs.tv_nsec); |
return ts_delta; |
} |
/* |
* sub = lhs - rhs, in normalized form |
*/ |
static inline struct timespec64 timespec64_sub(struct timespec64 lhs, |
struct timespec64 rhs) |
{ |
struct timespec64 ts_delta; |
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, |
lhs.tv_nsec - rhs.tv_nsec); |
return ts_delta; |
} |
/* |
* Returns true if the timespec64 is norm, false if denorm: |
*/ |
static inline bool timespec64_valid(const struct timespec64 *ts) |
{ |
/* Dates before 1970 are bogus */ |
if (ts->tv_sec < 0) |
return false; |
/* Can't have more nanoseconds then a second */ |
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
return false; |
return true; |
} |
static inline bool timespec64_valid_strict(const struct timespec64 *ts) |
{ |
if (!timespec64_valid(ts)) |
return false; |
/* Disallow values that could overflow ktime_t */ |
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
return false; |
return true; |
} |
/** |
* timespec64_to_ns - Convert timespec64 to nanoseconds |
* @ts: pointer to the timespec64 variable to be converted |
* |
* Returns the scalar nanosecond representation of the timespec64 |
* parameter. |
*/ |
static inline s64 timespec64_to_ns(const struct timespec64 *ts) |
{ |
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
} |
/** |
* ns_to_timespec64 - Convert nanoseconds to timespec64 |
* @nsec: the nanoseconds value to be converted |
* |
* Returns the timespec64 representation of the nsec parameter. |
*/ |
extern struct timespec64 ns_to_timespec64(const s64 nsec); |
/** |
* timespec64_add_ns - Adds nanoseconds to a timespec64 |
* @a: pointer to timespec64 to be incremented |
* @ns: unsigned nanoseconds value to be added |
* |
* This must always be inlined because its used from the x86-64 vdso, |
* which cannot call other kernel functions. |
*/ |
static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) |
{ |
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); |
a->tv_nsec = ns; |
} |
#endif |
#endif /* _LINUX_TIME64_H */ |
/drivers/include/linux/types.h |
---|
1,23 → 1,14 |
#ifndef _LINUX_TYPES_H |
#define _LINUX_TYPES_H |
#include <asm/types.h> |
#define __EXPORTED_HEADERS__ |
#include <uapi/linux/types.h> |
#ifndef __ASSEMBLY__ |
#ifdef __KERNEL__ |
#define DECLARE_BITMAP(name,bits) \ |
unsigned long name[BITS_TO_LONGS(bits)] |
#else |
#ifndef __EXPORTED_HEADERS__ |
#warning "Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders" |
#endif /* __EXPORTED_HEADERS__ */ |
#endif |
#include <linux/posix_types.h> |
#ifdef __KERNEL__ |
typedef __u32 __kernel_dev_t; |
typedef __kernel_fd_set fd_set; |
158,48 → 149,12 |
typedef u32 dma_addr_t; |
#endif /* dma_addr_t */ |
#endif /* __KERNEL__ */ |
/* |
* Below are truly Linux-specific types that should never collide with |
* any application/library that wants linux/types.h. |
*/ |
#ifdef __CHECKER__ |
#define __bitwise__ __attribute__((bitwise)) |
#else |
#define __bitwise__ |
#endif |
#ifdef __CHECK_ENDIAN__ |
#define __bitwise __bitwise__ |
#else |
#define __bitwise |
#endif |
typedef __u16 __bitwise __le16; |
typedef __u16 __bitwise __be16; |
typedef __u32 __bitwise __le32; |
typedef __u32 __bitwise __be32; |
typedef __u64 __bitwise __le64; |
typedef __u64 __bitwise __be64; |
typedef __u16 __bitwise __sum16; |
typedef __u32 __bitwise __wsum; |
/* |
* aligned_u64 should be used in defining kernel<->userspace ABIs to avoid |
* common 32/64-bit compat problems. |
* 64-bit values align to 4-byte boundaries on x86_32 (and possibly other |
* architectures) and to 8-byte boundaries on 64-bit architetures. The new |
* aligned_64 type enforces 8-byte alignment so that structs containing |
* aligned_64 values have the same alignment on 32-bit and 64-bit architectures. |
* No conversions are necessary between 32-bit user-space and a 64-bit kernel. |
*/ |
#define __aligned_u64 __u64 __attribute__((aligned(8))) |
#define __aligned_be64 __be64 __attribute__((aligned(8))) |
#define __aligned_le64 __le64 __attribute__((aligned(8))) |
#ifdef __KERNEL__ |
typedef unsigned __bitwise__ gfp_t; |
typedef unsigned __bitwise__ fmode_t; |
typedef unsigned __bitwise__ oom_flags_t; |
247,111 → 202,6 |
char f_fpack[6]; |
}; |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
typedef unsigned char u8_t; |
typedef unsigned short u16_t; |
typedef unsigned long u32_t; |
typedef unsigned long long u64_t; |
typedef unsigned int addr_t; |
typedef unsigned int count_t; |
#define false 0 |
#define true 1 |
#define likely(x) __builtin_expect(!!(x), 1) |
#define unlikely(x) __builtin_expect(!!(x), 0) |
#define BITS_PER_LONG 32 |
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
int dbgprintf(const char* format, ...); |
#define GFP_KERNEL 0 |
#define GFP_ATOMIC 0 |
//#include <stdio.h> |
int snprintf(char *str, size_t size, const char *format, ...); |
//#include <string.h> |
void* memcpy(void *s1, const void *s2, size_t n); |
void* memset(void *s, int c, size_t n); |
size_t strlen(const char *s); |
char *strcpy(char *s1, const char *s2); |
char *strncpy (char *dst, const char *src, size_t len); |
void *malloc(size_t size); |
void* realloc(void* oldmem, size_t bytes); |
#define kfree free |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
{ |
return realloc(p, new_size); |
} |
static inline void *kzalloc(size_t size, uint32_t flags) |
{ |
void *ret = malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
#define kmalloc(s,f) kzalloc((s), (f)) |
struct drm_file; |
#define PAGE_SHIFT 12 |
#define PAGE_SIZE (1UL << PAGE_SHIFT) |
#define PAGE_MASK (~(PAGE_SIZE-1)) |
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
struct timeval |
{ |
__kernel_time_t tv_sec; /* seconds */ |
__kernel_suseconds_t tv_usec; /* microseconds */ |
}; |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#ifndef __read_mostly |
#define __read_mostly |
#endif |
/** |
* struct callback_head - callback structure for use with RCU and task_work |
* @next: next update requests in a list |
363,4 → 213,5 |
}; |
#define rcu_head callback_head |
#endif /* __ASSEMBLY__ */ |
#endif /* _LINUX_TYPES_H */ |
/drivers/include/linux/uuid.h |
---|
0,0 → 1,58 |
/* |
* UUID/GUID definition |
* |
* Copyright (C) 2010, Intel Corp. |
* Huang Ying <ying.huang@intel.com> |
* |
* This program is free software; you can redistribute it and/or |
* modify it under the terms of the GNU General Public License version |
* 2 as published by the Free Software Foundation; |
* |
* This program is distributed in the hope that it will be useful, |
* but WITHOUT ANY WARRANTY; without even the implied warranty of |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
* GNU General Public License for more details. |
* |
* You should have received a copy of the GNU General Public License |
* along with this program; if not, write to the Free Software |
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
*/ |
#ifndef _UAPI_LINUX_UUID_H_ |
#define _UAPI_LINUX_UUID_H_ |
#include <linux/types.h> |
#include <linux/string.h> |
typedef struct { |
__u8 b[16]; |
} uuid_le; |
typedef struct { |
__u8 b[16]; |
} uuid_be; |
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_le) \ |
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ |
(b) & 0xff, ((b) >> 8) & 0xff, \ |
(c) & 0xff, ((c) >> 8) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ |
((uuid_be) \ |
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \ |
((b) >> 8) & 0xff, (b) & 0xff, \ |
((c) >> 8) & 0xff, (c) & 0xff, \ |
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) |
#define NULL_UUID_LE \ |
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#define NULL_UUID_BE \ |
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \ |
0x00, 0x00, 0x00, 0x00) |
#endif /* _UAPI_LINUX_UUID_H_ */ |
/drivers/include/linux/vgaarb.h |
---|
0,0 → 1,249 |
/* |
* The VGA aribiter manages VGA space routing and VGA resource decode to |
* allow multiple VGA devices to be used in a system in a safe way. |
* |
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> |
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> |
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS |
* IN THE SOFTWARE. |
* |
*/ |
#ifndef LINUX_VGA_H |
#define LINUX_VGA_H |
//#include <video/vga.h> |
/* Legacy VGA regions */ |
#define VGA_RSRC_NONE 0x00 |
#define VGA_RSRC_LEGACY_IO 0x01 |
#define VGA_RSRC_LEGACY_MEM 0x02 |
#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM) |
/* Non-legacy access */ |
#define VGA_RSRC_NORMAL_IO 0x04 |
#define VGA_RSRC_NORMAL_MEM 0x08 |
/* Passing that instead of a pci_dev to use the system "default" |
* device, that is the one used by vgacon. Archs will probably |
* have to provide their own vga_default_device(); |
*/ |
#define VGA_DEFAULT_DEVICE (NULL) |
struct pci_dev; |
/* For use by clients */ |
/** |
* vga_set_legacy_decoding |
* |
* @pdev: pci device of the VGA card |
* @decodes: bit mask of what legacy regions the card decodes |
* |
* Indicates to the arbiter if the card decodes legacy VGA IOs, |
* legacy VGA Memory, both, or none. All cards default to both, |
* the card driver (fbdev for example) should tell the arbiter |
* if it has disabled legacy decoding, so the card can be left |
* out of the arbitration process (and can be safe to take |
* interrupts at any time. |
*/ |
extern void vga_set_legacy_decoding(struct pci_dev *pdev, |
unsigned int decodes); |
/** |
* vga_get - acquire & locks VGA resources |
* |
* @pdev: pci device of the VGA card or NULL for the system default |
* @rsrc: bit mask of resources to acquire and lock |
* @interruptible: blocking should be interruptible by signals ? |
* |
* This function acquires VGA resources for the given |
* card and mark those resources locked. If the resource requested |
* are "normal" (and not legacy) resources, the arbiter will first check |
* whether the card is doing legacy decoding for that type of resource. If |
* yes, the lock is "converted" into a legacy resource lock. |
* The arbiter will first look for all VGA cards that might conflict |
* and disable their IOs and/or Memory access, including VGA forwarding |
* on P2P bridges if necessary, so that the requested resources can |
* be used. Then, the card is marked as locking these resources and |
* the IO and/or Memory accesse are enabled on the card (including |
* VGA forwarding on parent P2P bridges if any). |
* This function will block if some conflicting card is already locking |
* one of the required resources (or any resource on a different bus |
* segment, since P2P bridges don't differenciate VGA memory and IO |
* afaik). You can indicate whether this blocking should be interruptible |
* by a signal (for userland interface) or not. |
* Must not be called at interrupt time or in atomic context. |
* If the card already owns the resources, the function succeeds. |
* Nested calls are supported (a per-resource counter is maintained) |
*/ |
#if defined(CONFIG_VGA_ARB) |
extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); |
#else |
static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } |
#endif |
/** |
* vga_get_interruptible |
* |
* Shortcut to vga_get |
*/ |
static inline int vga_get_interruptible(struct pci_dev *pdev, |
unsigned int rsrc) |
{ |
return vga_get(pdev, rsrc, 1); |
} |
/** |
* vga_get_uninterruptible |
* |
* Shortcut to vga_get |
*/ |
static inline int vga_get_uninterruptible(struct pci_dev *pdev, |
unsigned int rsrc) |
{ |
return vga_get(pdev, rsrc, 0); |
} |
/** |
* vga_tryget - try to acquire & lock legacy VGA resources |
* |
* @pdev: pci devivce of VGA card or NULL for system default |
* @rsrc: bit mask of resources to acquire and lock |
* |
* This function performs the same operation as vga_get(), but |
* will return an error (-EBUSY) instead of blocking if the resources |
* are already locked by another card. It can be called in any context |
*/ |
#if defined(CONFIG_VGA_ARB) |
extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); |
#else |
static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } |
#endif |
/** |
* vga_put - release lock on legacy VGA resources |
* |
* @pdev: pci device of VGA card or NULL for system default |
* @rsrc: but mask of resource to release |
* |
* This function releases resources previously locked by vga_get() |
* or vga_tryget(). The resources aren't disabled right away, so |
* that a subsequence vga_get() on the same card will succeed |
* immediately. Resources have a counter, so locks are only |
* released if the counter reaches 0. |
*/ |
#if defined(CONFIG_VGA_ARB) |
extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); |
#else |
#define vga_put(pdev, rsrc) |
#endif |
/** |
* vga_default_device |
* |
* This can be defined by the platform. The default implementation |
* is rather dumb and will probably only work properly on single |
* vga card setups and/or x86 platforms. |
* |
* If your VGA default device is not PCI, you'll have to return |
* NULL here. In this case, I assume it will not conflict with |
* any PCI card. If this is not true, I'll have to define two archs |
* hooks for enabling/disabling the VGA default device if that is |
* possible. This may be a problem with real _ISA_ VGA cards, in |
* addition to a PCI one. I don't know at this point how to deal |
* with that card. Can theirs IOs be disabled at all ? If not, then |
* I suppose it's a matter of having the proper arch hook telling |
* us about it, so we basically never allow anybody to succeed a |
* vga_get()... |
*/ |
#ifdef CONFIG_VGA_ARB |
extern struct pci_dev *vga_default_device(void); |
extern void vga_set_default_device(struct pci_dev *pdev); |
#else |
static inline struct pci_dev *vga_default_device(void) { return NULL; }; |
static inline void vga_set_default_device(struct pci_dev *pdev) { }; |
#endif |
/** |
* vga_conflicts |
* |
* Architectures should define this if they have several |
* independent PCI domains that can afford concurrent VGA |
* decoding |
*/ |
#ifndef __ARCH_HAS_VGA_CONFLICT |
static inline int vga_conflicts(struct pci_dev *p1, struct pci_dev *p2) |
{ |
return 1; |
} |
#endif |
/** |
* vga_client_register |
* |
* @pdev: pci device of the VGA client |
* @cookie: client cookie to be used in callbacks |
* @irq_set_state: irq state change callback |
* @set_vga_decode: vga decode change callback |
* |
* return value: 0 on success, -1 on failure |
* Register a client with the VGA arbitration logic |
* |
* Clients have two callback mechanisms they can use. |
* irq enable/disable callback - |
* If a client can't disable its GPUs VGA resources, then we |
* need to be able to ask it to turn off its irqs when we |
* turn off its mem and io decoding. |
* set_vga_decode |
* If a client can disable its GPU VGA resource, it will |
* get a callback from this to set the encode/decode state |
* |
* Rationale: we cannot disable VGA decode resources unconditionally |
* some single GPU laptops seem to require ACPI or BIOS access to the |
* VGA registers to control things like backlights etc. |
* Hopefully newer multi-GPU laptops do something saner, and desktops |
* won't have any special ACPI for this. |
* They driver will get a callback when VGA arbitration is first used |
* by userspace since we some older X servers have issues. |
*/ |
#if defined(CONFIG_VGA_ARB) |
int vga_client_register(struct pci_dev *pdev, void *cookie, |
void (*irq_set_state)(void *cookie, bool state), |
unsigned int (*set_vga_decode)(void *cookie, bool state)); |
#else |
static inline int vga_client_register(struct pci_dev *pdev, void *cookie, |
void (*irq_set_state)(void *cookie, bool state), |
unsigned int (*set_vga_decode)(void *cookie, bool state)) |
{ |
return 0; |
} |
#endif |
#endif /* LINUX_VGA_H */ |
/drivers/include/linux/wait.h |
---|
1,8 → 1,15 |
#ifndef _LINUX_WAIT_H |
#define _LINUX_WAIT_H |
/* |
* Linux wait queue related types and methods |
*/ |
#include <linux/list.h> |
#include <linux/stddef.h> |
#include <linux/spinlock.h> |
#include <asm/current.h> |
#include <linux/list.h> |
#include <syscall.h> |
typedef struct __wait_queue wait_queue_t; |
28,6 → 35,10 |
return !list_empty(&q->task_list); |
} |
extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); |
extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); |
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) |
{ |
list_add(&new->task_list, &head->task_list); |
145,10 → 156,10 |
}; |
struct completion { |
unsigned int done; |
wait_queue_head_t wait; |
}; |
//struct completion { |
// unsigned int done; |
// wait_queue_head_t wait; |
//}; |
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
/drivers/include/linux/workqueue.h |
---|
1,11 → 1,21 |
/* |
* workqueue.h --- work queue handling for Linux. |
*/ |
#ifndef _LINUX_WORKQUEUE_H |
#define _LINUX_WORKQUEUE_H |
#include <linux/list.h> |
#include <linux/linkage.h> |
#include <linux/lockdep.h> |
#include <linux/threads.h> |
#include <syscall.h> |
struct workqueue_struct; |
struct work_struct; |
typedef void (*work_func_t)(struct work_struct *work); |
void __stdcall delayed_work_timer_fn(unsigned long __data); |
/* |
* Workqueue flags and constants. For details, please refer to |
38,6 → 48,9 |
struct list_head entry; |
struct workqueue_struct *data; |
work_func_t func; |
#ifdef CONFIG_LOCKDEP |
struct lockdep_map lockdep_map; |
#endif |
}; |
struct delayed_work { |
/drivers/include/linux/ww_mutex.h |
---|
17,8 → 17,6 |
#include <linux/mutex.h> |
#include <syscall.h> |
#define current (void*)GetPid() |
struct ww_class { |
atomic_long_t stamp; |
struct lock_class_key acquire_key; |