/drivers/include/linux/compiler-gcc4.h |
---|
13,7 → 13,7 |
#define __must_check __attribute__((warn_unused_result)) |
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) |
#if GCC_VERSION >= 40100 |
#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 |
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
#endif |
/drivers/include/linux/ctype.h |
---|
61,4 → 61,10 |
return c | 0x20; |
} |
/* Fast check for octal digit */ |
static inline int isodigit(const char c) |
{ |
return c >= '0' && c <= '7'; |
} |
#endif |
/drivers/include/linux/err.h |
---|
24,17 → 24,17 |
return (void *) error; |
} |
static inline long __must_check PTR_ERR(const void *ptr) |
static inline long __must_check PTR_ERR(__force const void *ptr) |
{ |
return (long) ptr; |
} |
static inline long __must_check IS_ERR(const void *ptr) |
static inline long __must_check IS_ERR(__force const void *ptr) |
{ |
return IS_ERR_VALUE((unsigned long)ptr); |
} |
static inline long __must_check IS_ERR_OR_NULL(const void *ptr) |
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr) |
{ |
return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
} |
46,13 → 46,13 |
* Explicitly cast an error-valued pointer to another pointer type in such a |
* way as to make it clear that's what's going on. |
*/ |
static inline void * __must_check ERR_CAST(const void *ptr) |
static inline void * __must_check ERR_CAST(__force const void *ptr) |
{ |
/* cast away the const */ |
return (void *) ptr; |
} |
static inline int __must_check PTR_RET(const void *ptr) |
static inline int __must_check PTR_RET(__force const void *ptr) |
{ |
if (IS_ERR(ptr)) |
return PTR_ERR(ptr); |
/drivers/include/linux/hash.h |
---|
0,0 → 1,81 |
#ifndef _LINUX_HASH_H |
#define _LINUX_HASH_H |
/* Fast hashing routine for ints, longs and pointers. |
(C) 2002 Nadia Yvette Chambers, IBM */ |
/* |
* Knuth recommends primes in approximately golden ratio to the maximum |
* integer representable by a machine word for multiplicative hashing. |
* Chuck Lever verified the effectiveness of this technique: |
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf |
* |
* These primes are chosen to be bit-sparse, that is operations on |
* them can use shifts and additions instead of multiplications for |
* machines where multiplications are slow. |
*/ |
#include <asm/types.h> |
#include <linux/compiler.h> |
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL |
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ |
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL |
#if BITS_PER_LONG == 32 |
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32 |
#define hash_long(val, bits) hash_32(val, bits) |
#elif BITS_PER_LONG == 64 |
#define hash_long(val, bits) hash_64(val, bits) |
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64 |
#else |
#error Wordsize not 32 or 64 |
#endif |
static __always_inline u64 hash_64(u64 val, unsigned int bits) |
{ |
u64 hash = val; |
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
u64 n = hash; |
n <<= 18; |
hash -= n; |
n <<= 33; |
hash -= n; |
n <<= 3; |
hash += n; |
n <<= 3; |
hash -= n; |
n <<= 4; |
hash += n; |
n <<= 2; |
hash += n; |
/* High bits are more random, so use them. */ |
return hash >> (64 - bits); |
} |
static inline u32 hash_32(u32 val, unsigned int bits) |
{ |
/* On some cpus multiply is faster, on others gcc will do shifts */ |
u32 hash = val * GOLDEN_RATIO_PRIME_32; |
/* High bits are more random, so use them. */ |
return hash >> (32 - bits); |
} |
static inline unsigned long hash_ptr(const void *ptr, unsigned int bits) |
{ |
return hash_long((unsigned long)ptr, bits); |
} |
static inline u32 hash32_ptr(const void *ptr) |
{ |
unsigned long val = (unsigned long)ptr; |
#if BITS_PER_LONG == 64 |
val ^= (val >> 32); |
#endif |
return (u32)val; |
} |
#endif /* _LINUX_HASH_H */ |
/drivers/include/linux/i2c.h |
---|
55,7 → 55,6 |
* struct i2c_driver - represent an I2C device driver |
* @class: What kind of i2c device we instantiate (for detect) |
* @attach_adapter: Callback for bus addition (deprecated) |
* @detach_adapter: Callback for bus removal (deprecated) |
* @probe: Callback for device binding |
* @remove: Callback for device unbinding |
* @shutdown: Callback for device shutdown |
92,12 → 91,10 |
struct i2c_driver { |
unsigned int class; |
/* Notifies the driver that a new bus has appeared or is about to be |
* removed. You should avoid using this, it will be removed in a |
* near future. |
/* Notifies the driver that a new bus has appeared. You should avoid |
* using this, it will be removed in a near future. |
*/ |
int (*attach_adapter)(struct i2c_adapter *) __deprecated; |
int (*detach_adapter)(struct i2c_adapter *) __deprecated; |
/* Standard driver model interfaces */ |
int (*probe)(struct i2c_client *, const struct i2c_device_id *); |
192,9 → 189,6 |
unsigned short addr; |
void *platform_data; |
struct dev_archdata *archdata; |
#ifdef CONFIG_OF |
struct device_node *of_node; |
#endif |
int irq; |
}; |
/drivers/include/linux/idr.h |
---|
48,6 → 48,7 |
struct idr_layer *id_free; |
int layers; /* only valid w/o concurrent changes */ |
int id_free_cnt; |
int cur; /* current pos for cyclic allocation */ |
spinlock_t lock; |
}; |
79,10 → 80,9 |
*/ |
void *idr_find_slowpath(struct idr *idp, int id); |
int idr_pre_get(struct idr *idp, gfp_t gfp_mask); |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
void idr_preload(gfp_t gfp_mask); |
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); |
int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask); |
int idr_for_each(struct idr *idp, |
int (*fn)(int id, void *p, void *data), void *data); |
void *idr_get_next(struct idr *idp, int *nextid); |
105,7 → 105,7 |
/** |
* idr_find - return pointer for given id |
* @idp: idr handle |
* @idr: idr handle |
* @id: lookup key |
* |
* Return the pointer given the id it has been registered with. A %NULL |
126,31 → 126,69 |
} |
/** |
* idr_get_new - allocate new idr entry |
* idr_for_each_entry - iterate over an idr's elements of a given type |
* @idp: idr handle |
* @entry: the type * to use as cursor |
* @id: id entry's key |
* |
* @entry and @id do not need to be initialized before the loop, and |
* after normal terminatinon @entry is left with the value NULL. This |
* is convenient for a "not found" value. |
*/ |
#define idr_for_each_entry(idp, entry, id) \ |
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) |
/* |
* Don't use the following functions. These exist only to suppress |
* deprecated warnings on EXPORT_SYMBOL()s. |
*/ |
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); |
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
void __idr_remove_all(struct idr *idp); |
/** |
* idr_pre_get - reserve resources for idr allocation |
* @idp: idr handle |
* @gfp_mask: memory allocation flags |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
{ |
return __idr_pre_get(idp, gfp_mask); |
} |
/** |
* idr_get_new_above - allocate new idr entry above or equal to a start id |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @starting_id: id to start search at |
* @id: pointer to the allocated handle |
* |
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero. |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int idr_get_new(struct idr *idp, void *ptr, int *id) |
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, |
int starting_id, int *id) |
{ |
return idr_get_new_above(idp, ptr, 0, id); |
return __idr_get_new_above(idp, ptr, starting_id, id); |
} |
/** |
* idr_for_each_entry - iterate over an idr's elements of a given type |
* idr_get_new - allocate new idr entry |
* @idp: idr handle |
* @entry: the type * to use as cursor |
* @id: id entry's key |
* @ptr: pointer you want associated with the id |
* @id: pointer to the allocated handle |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
#define idr_for_each_entry(idp, entry, id) \ |
for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \ |
entry != NULL; \ |
++id, entry = (typeof(entry))idr_get_next((idp), &(id))) |
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) |
{ |
return __idr_get_new_above(idp, ptr, 0, id); |
} |
void __idr_remove_all(struct idr *idp); /* don't use */ |
/** |
* idr_remove_all - remove all ids from the given idr tree |
* @idp: idr handle |
193,8 → 231,22 |
void ida_destroy(struct ida *ida); |
void ida_init(struct ida *ida); |
void __init idr_init_cache(void); |
int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
gfp_t gfp_mask); |
void ida_simple_remove(struct ida *ida, unsigned int id); |
/** |
* ida_get_new - allocate new ID |
* @ida: idr handle |
* @p_id: pointer to the allocated handle |
* |
* Simple wrapper around ida_get_new_above() w/ @starting_id of zero. |
*/ |
static inline int ida_get_new(struct ida *ida, int *p_id) |
{ |
return ida_get_new_above(ida, 0, p_id); |
} |
void __init idr_init_cache(void); |
#endif /* __IDR_H__ */ |
/drivers/include/linux/jiffies.h |
---|
130,6 → 130,10 |
((__s64)(a) - (__s64)(b) >= 0)) |
#define time_before_eq64(a,b) time_after_eq64(b,a) |
#define time_in_range64(a, b, c) \ |
(time_after_eq64(a, b) && \ |
time_before_eq64(a, c)) |
/* |
* These four macros compare jiffies and 'a' for convenience. |
*/ |
/drivers/include/linux/list.h |
---|
361,22 → 361,22 |
list_entry((ptr)->next, type, member) |
/** |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* list_first_entry_or_null - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
*/ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
#define list_first_entry_or_null(ptr, type, member) \ |
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) |
/** |
* __list_for_each - iterate over a list |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* |
* This variant doesn't differ from list_for_each() any more. |
* We don't do prefetching in either case. |
*/ |
#define __list_for_each(pos, head) \ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
/** |
665,54 → 665,51 |
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
pos = n) |
#define hlist_entry_safe(ptr, type, member) \ |
({ typeof(ptr) ____ptr = (ptr); \ |
____ptr ? hlist_entry(____ptr, type, member) : NULL; \ |
}) |
/** |
* hlist_for_each_entry - iterate over list of given type |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry(tpos, pos, head, member) \ |
for (pos = (head)->first; \ |
pos && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
#define hlist_for_each_entry(pos, head, member) \ |
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ |
pos; \ |
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue(tpos, pos, member) \ |
for (pos = (pos)->next; \ |
pos && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
#define hlist_for_each_entry_continue(pos, member) \ |
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ |
pos; \ |
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_from - iterate over a hlist continuing from current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from(tpos, pos, member) \ |
for (; pos && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
#define hlist_for_each_entry_from(pos, member) \ |
for (; pos; \ |
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @pos: the type * to use as a loop cursor. |
* @n: another &struct hlist_node to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ n = pos->next; 1; }) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = n) |
#define hlist_for_each_entry_safe(pos, n, head, member) \ |
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\ |
pos && ({ n = pos->member.next; 1; }); \ |
pos = hlist_entry_safe(n, typeof(*pos), member)) |
#endif |
/drivers/include/linux/math64.h |
---|
7,6 → 7,7 |
#if BITS_PER_LONG == 64 |
#define div64_long(x,y) div64_s64((x),(y)) |
#define div64_ul(x, y) div64_u64((x), (y)) |
/** |
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
48,6 → 49,7 |
#elif BITS_PER_LONG == 32 |
#define div64_long(x,y) div_s64((x),(y)) |
#define div64_ul(x, y) div_u64((x), (y)) |
#ifndef div_u64_rem |
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
/drivers/include/linux/mod_devicetable.h |
---|
33,8 → 33,7 |
__u32 model_id; |
__u32 specifier_id; |
__u32 version; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; |
}; |
147,8 → 146,7 |
__u16 group; |
__u32 vendor; |
__u32 product; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; |
}; |
/* s390 CCW devices */ |
172,8 → 170,6 |
struct ap_device_id { |
__u16 match_flags; /* which fields to match against */ |
__u8 dev_type; /* device type */ |
__u8 pad1; |
__u32 pad2; |
kernel_ulong_t driver_info; |
}; |
183,13 → 179,10 |
struct css_device_id { |
__u8 match_flags; |
__u8 type; /* subchannel type */ |
__u16 pad2; |
__u32 pad3; |
kernel_ulong_t driver_data; |
}; |
#define ACPI_ID_LEN 16 /* only 9 bytes needed here, 16 bytes are used */ |
/* to workaround crosscompile issues */ |
#define ACPI_ID_LEN 9 |
struct acpi_device_id { |
__u8 id[ACPI_ID_LEN]; |
230,11 → 223,7 |
char name[32]; |
char type[32]; |
char compatible[128]; |
#ifdef __KERNEL__ |
const void *data; |
#else |
kernel_ulong_t data; |
#endif |
}; |
/* VIO */ |
259,24 → 248,14 |
/* for pseudo multi-function devices */ |
__u8 device_no; |
__u32 prod_id_hash[4] |
__attribute__((aligned(sizeof(__u32)))); |
__u32 prod_id_hash[4]; |
/* not matched against in kernelspace*/ |
#ifdef __KERNEL__ |
const char * prod_id[4]; |
#else |
kernel_ulong_t prod_id[4] |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
#endif |
/* not matched against */ |
kernel_ulong_t driver_info; |
#ifdef __KERNEL__ |
char * cisfile; |
#else |
kernel_ulong_t cisfile; |
#endif |
}; |
#define PCMCIA_DEV_ID_MATCH_MANF_ID 0x0001 |
372,8 → 351,7 |
__u8 class; /* Standard interface or SDIO_ANY_ID */ |
__u16 vendor; /* Vendor or SDIO_ANY_ID */ |
__u16 device; /* Device ID or SDIO_ANY_ID */ |
kernel_ulong_t driver_data /* Data private to the driver */ |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* SSB core, see drivers/ssb/ */ |
381,7 → 359,8 |
__u16 vendor; |
__u16 coreid; |
__u8 revision; |
}; |
__u8 __pad; |
} __attribute__((packed, aligned(2))); |
#define SSB_DEVICE(_vendor, _coreid, _revision) \ |
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, } |
#define SSB_DEVTABLE_END \ |
397,7 → 376,7 |
__u16 id; |
__u8 rev; |
__u8 class; |
}; |
} __attribute__((packed,aligned(2))); |
#define BCMA_CORE(_manuf, _id, _rev, _class) \ |
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } |
#define BCMA_CORETABLE_END \ |
414,6 → 393,23 |
}; |
#define VIRTIO_DEV_ANY_ID 0xffffffff |
/* |
* For Hyper-V devices we use the device guid as the id. |
*/ |
struct hv_vmbus_device_id { |
__u8 guid[16]; |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* rpmsg */ |
#define RPMSG_NAME_SIZE 32 |
#define RPMSG_DEVICE_MODALIAS_FMT "rpmsg:%s" |
struct rpmsg_device_id { |
char name[RPMSG_NAME_SIZE]; |
}; |
/* i2c */ |
#define I2C_NAME_SIZE 20 |
421,8 → 417,7 |
struct i2c_device_id { |
char name[I2C_NAME_SIZE]; |
kernel_ulong_t driver_data /* Data private to the driver */ |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* spi */ |
432,8 → 427,7 |
struct spi_device_id { |
char name[SPI_NAME_SIZE]; |
kernel_ulong_t driver_data /* Data private to the driver */ |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* dmi */ |
461,20 → 455,12 |
}; |
struct dmi_strmatch { |
unsigned char slot; |
unsigned char slot:7; |
unsigned char exact_match:1; |
char substr[79]; |
}; |
#ifndef __KERNEL__ |
struct dmi_system_id { |
kernel_ulong_t callback; |
kernel_ulong_t ident; |
struct dmi_strmatch matches[4]; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
}; |
#else |
struct dmi_system_id { |
int (*callback)(const struct dmi_system_id *); |
const char *ident; |
struct dmi_strmatch matches[4]; |
487,9 → 473,9 |
* error: storage size of '__mod_dmi_device_table' isn't known |
*/ |
#define dmi_device_id dmi_system_id |
#endif |
#define DMI_MATCH(a, b) { a, b } |
#define DMI_MATCH(a, b) { .slot = a, .substr = b } |
#define DMI_EXACT_MATCH(a, b) { .slot = a, .substr = b, .exact_match = 1 } |
#define PLATFORM_NAME_SIZE 20 |
#define PLATFORM_MODULE_PREFIX "platform:" |
496,8 → 482,7 |
struct platform_device_id { |
char name[PLATFORM_NAME_SIZE]; |
kernel_ulong_t driver_data |
__attribute__((aligned(sizeof(kernel_ulong_t)))); |
kernel_ulong_t driver_data; |
}; |
#define MDIO_MODULE_PREFIX "mdio:" |
542,4 → 527,74 |
kernel_ulong_t driver_data; /* data private to the driver */ |
}; |
/** |
* struct amba_id - identifies a device on an AMBA bus |
* @id: The significant bits if the hardware device ID |
* @mask: Bitmask specifying which bits of the id field are significant when |
* matching. A driver binds to a device when ((hardware device ID) & mask) |
* == id. |
* @data: Private data used by the driver. |
*/ |
struct amba_id { |
unsigned int id; |
unsigned int mask; |
void *data; |
}; |
/* |
* Match x86 CPUs for CPU specific drivers. |
* See documentation of "x86_match_cpu" for details. |
*/ |
struct x86_cpu_id { |
__u16 vendor; |
__u16 family; |
__u16 model; |
__u16 feature; /* bit index */ |
kernel_ulong_t driver_data; |
}; |
#define X86_FEATURE_MATCH(x) \ |
{ X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x } |
#define X86_VENDOR_ANY 0xffff |
#define X86_FAMILY_ANY 0 |
#define X86_MODEL_ANY 0 |
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ |
#define IPACK_ANY_FORMAT 0xff |
#define IPACK_ANY_ID (~0) |
struct ipack_device_id { |
__u8 format; /* Format version or IPACK_ANY_ID */ |
__u32 vendor; /* Vendor ID or IPACK_ANY_ID */ |
__u32 device; /* Device ID or IPACK_ANY_ID */ |
}; |
#define MEI_CL_MODULE_PREFIX "mei:" |
#define MEI_CL_NAME_SIZE 32 |
struct mei_cl_device_id { |
char name[MEI_CL_NAME_SIZE]; |
kernel_ulong_t driver_info; |
}; |
/* RapidIO */ |
#define RIO_ANY_ID 0xffff |
/** |
* struct rio_device_id - RIO device identifier |
* @did: RapidIO device ID |
* @vid: RapidIO vendor ID |
* @asm_did: RapidIO assembly device ID |
* @asm_vid: RapidIO assembly vendor ID |
* |
* Identifies a RapidIO device based on both the device/vendor IDs and |
* the assembly device/vendor IDs. |
*/ |
struct rio_device_id { |
__u16 did, vid; |
__u16 asm_did, asm_vid; |
}; |
#endif /* LINUX_MOD_DEVICETABLE_H */ |
/drivers/include/linux/rculist.h |
---|
0,0 → 1,526 |
#ifndef _LINUX_RCULIST_H |
#define _LINUX_RCULIST_H |
#ifdef __KERNEL__ |
/* |
* RCU-protected list version |
*/ |
#include <linux/list.h> |
//#include <linux/rcupdate.h> |
/* |
* Why is there no list_empty_rcu()? Because list_empty() serves this |
* purpose. The list_empty() function fetches the RCU-protected pointer |
* and compares it to the address of the list head, but neither dereferences |
* this pointer itself nor provides this pointer to the caller. Therefore, |
* it is not necessary to use rcu_dereference(), so that list_empty() can |
* be used anywhere you would want to use a list_empty_rcu(). |
*/ |
/* |
* return the ->next pointer of a list_head in an rcu safe |
* way, we must not access it directly |
*/ |
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) |
/* |
* Insert a new entry between two known consecutive entries. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void __list_add_rcu(struct list_head *new, |
struct list_head *prev, struct list_head *next) |
{ |
new->next = next; |
new->prev = prev; |
rcu_assign_pointer(list_next_rcu(prev), new); |
next->prev = new; |
} |
#else |
extern void __list_add_rcu(struct list_head *new, |
struct list_head *prev, struct list_head *next); |
#endif |
/** |
* list_add_rcu - add a new entry to rcu-protected list |
* @new: new entry to be added |
* @head: list head to add it after |
* |
* Insert a new entry after the specified head. |
* This is good for implementing stacks. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as list_add_rcu() |
* or list_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* list_for_each_entry_rcu(). |
*/ |
static inline void list_add_rcu(struct list_head *new, struct list_head *head) |
{ |
__list_add_rcu(new, head, head->next); |
} |
/** |
* list_add_tail_rcu - add a new entry to rcu-protected list |
* @new: new entry to be added |
* @head: list head to add it before |
* |
* Insert a new entry before the specified head. |
* This is useful for implementing queues. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as list_add_tail_rcu() |
* or list_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* list_for_each_entry_rcu(). |
*/ |
static inline void list_add_tail_rcu(struct list_head *new, |
struct list_head *head) |
{ |
__list_add_rcu(new, head->prev, head); |
} |
/** |
* list_del_rcu - deletes entry from list without re-initialization |
* @entry: the element to delete from the list. |
* |
* Note: list_empty() on entry does not return true after this, |
* the entry is in an undefined state. It is useful for RCU based |
* lockfree traversal. |
* |
* In particular, it means that we can not poison the forward |
* pointers that may still be used for walking the list. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as list_del_rcu() |
* or list_add_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* list_for_each_entry_rcu(). |
* |
* Note that the caller is not permitted to immediately free |
* the newly deleted entry. Instead, either synchronize_rcu() |
* or call_rcu() must be used to defer freeing until an RCU |
* grace period has elapsed. |
*/ |
static inline void list_del_rcu(struct list_head *entry) |
{ |
__list_del_entry(entry); |
entry->prev = LIST_POISON2; |
} |
/** |
* hlist_del_init_rcu - deletes entry from hash list with re-initialization |
* @n: the element to delete from the hash list. |
* |
* Note: list_unhashed() on the node return true after this. It is |
* useful for RCU based read lockfree traversal if the writer side |
* must know if the list entry is still hashed or already unhashed. |
* |
* In particular, it means that we can not poison the forward pointers |
* that may still be used for walking the hash list and we can only |
* zero the pprev pointer so list_unhashed() will return true after |
* this. |
* |
* The caller must take whatever precautions are necessary (such as |
* holding appropriate locks) to avoid racing with another |
* list-mutation primitive, such as hlist_add_head_rcu() or |
* hlist_del_rcu(), running on this same list. However, it is |
* perfectly legal to run concurrently with the _rcu list-traversal |
* primitives, such as hlist_for_each_entry_rcu(). |
*/ |
static inline void hlist_del_init_rcu(struct hlist_node *n) |
{ |
if (!hlist_unhashed(n)) { |
__hlist_del(n); |
n->pprev = NULL; |
} |
} |
/** |
* list_replace_rcu - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* The @old entry will be replaced with the @new entry atomically. |
* Note: @old should not be empty. |
*/ |
static inline void list_replace_rcu(struct list_head *old, |
struct list_head *new) |
{ |
new->next = old->next; |
new->prev = old->prev; |
rcu_assign_pointer(list_next_rcu(new->prev), new); |
new->next->prev = new; |
old->prev = LIST_POISON2; |
} |
/** |
* list_splice_init_rcu - splice an RCU-protected list into an existing list. |
* @list: the RCU-protected list to splice |
* @head: the place in the list to splice the first list into |
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... |
* |
* @head can be RCU-read traversed concurrently with this function. |
* |
* Note that this function blocks. |
* |
* Important note: the caller must take whatever action is necessary to |
* prevent any other updates to @head. In principle, it is possible |
* to modify the list as soon as sync() begins execution. |
* If this sort of thing becomes necessary, an alternative version |
* based on call_rcu() could be created. But only if -really- |
* needed -- there is no shortage of RCU API members. |
*/ |
static inline void list_splice_init_rcu(struct list_head *list, |
struct list_head *head, |
void (*sync)(void)) |
{ |
struct list_head *first = list->next; |
struct list_head *last = list->prev; |
struct list_head *at = head->next; |
if (list_empty(list)) |
return; |
/* "first" and "last" tracking list, so initialize it. */ |
INIT_LIST_HEAD(list); |
/* |
* At this point, the list body still points to the source list. |
* Wait for any readers to finish using the list before splicing |
* the list body into the new list. Any new readers will see |
* an empty list. |
*/ |
sync(); |
/* |
* Readers are finished with the source list, so perform splice. |
* The order is important if the new list is global and accessible |
* to concurrent RCU readers. Note that RCU readers are not |
* permitted to traverse the prev pointers without excluding |
* this function. |
*/ |
last->next = at; |
rcu_assign_pointer(list_next_rcu(head), first); |
first->prev = head; |
at->prev = last; |
} |
/** |
* list_entry_rcu - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_entry_rcu(ptr, type, member) \ |
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ |
}) |
/** |
* Where are list_empty_rcu() and list_first_entry_rcu()? |
* |
* Implementing those functions following their counterparts list_empty() and |
* list_first_entry() is not advisable because they lead to subtle race |
* conditions as the following snippet shows: |
* |
* if (!list_empty_rcu(mylist)) { |
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); |
* do_something(bar); |
* } |
* |
* The list may not be empty when list_empty_rcu checks it, but it may be when |
* list_first_entry_rcu rereads the ->next pointer. |
* |
* Rereading the ->next pointer is not a problem for list_empty() and |
* list_first_entry() because they would be protected by a lock that blocks |
* writers. |
* |
* See list_first_or_null_rcu for an alternative. |
*/ |
/** |
* list_first_or_null_rcu - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note that if the list is empty, it returns NULL. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_first_or_null_rcu(ptr, type, member) \ |
({struct list_head *__ptr = (ptr); \ |
struct list_head __rcu *__next = list_next_rcu(__ptr); \ |
likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \ |
}) |
/** |
* list_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as list_add_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
*/ |
#define list_for_each_entry_rcu(pos, head, member) \ |
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue_rcu - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue_rcu(pos, head, member) \ |
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
/** |
* hlist_del_rcu - deletes entry from hash list without re-initialization |
* @n: the element to delete from the hash list. |
* |
* Note: list_unhashed() on entry does not return true after this, |
* the entry is in an undefined state. It is useful for RCU based |
* lockfree traversal. |
* |
* In particular, it means that we can not poison the forward |
* pointers that may still be used for walking the hash list. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry(). |
*/ |
static inline void hlist_del_rcu(struct hlist_node *n) |
{ |
__hlist_del(n); |
n->pprev = LIST_POISON2; |
} |
/** |
* hlist_replace_rcu - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* The @old entry will be replaced with the @new entry atomically. |
*/ |
static inline void hlist_replace_rcu(struct hlist_node *old, |
struct hlist_node *new) |
{ |
struct hlist_node *next = old->next; |
new->next = next; |
new->pprev = old->pprev; |
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
if (next) |
new->next->pprev = &new->next; |
old->pprev = LIST_POISON2; |
} |
/* |
* return the first or the next element in an RCU protected hlist |
*/ |
#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) |
#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) |
#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) |
/** |
* hlist_add_head_rcu |
* @n: the element to add to the hash list. |
* @h: the list to add to. |
* |
* Description: |
* Adds the specified element to the specified hlist, |
* while permitting racing traversals. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. Regardless of the type of CPU, the |
* list-traversal primitive must be guarded by rcu_read_lock(). |
*/ |
static inline void hlist_add_head_rcu(struct hlist_node *n, |
struct hlist_head *h) |
{ |
struct hlist_node *first = h->first; |
n->next = first; |
n->pprev = &h->first; |
rcu_assign_pointer(hlist_first_rcu(h), n); |
if (first) |
first->pprev = &n->next; |
} |
/** |
* hlist_add_before_rcu |
* @n: the new element to add to the hash list. |
* @next: the existing element to add the new element before. |
* |
* Description: |
* Adds the specified element to the specified hlist |
* before the specified node while permitting racing traversals. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. |
*/ |
static inline void hlist_add_before_rcu(struct hlist_node *n, |
struct hlist_node *next) |
{ |
n->pprev = next->pprev; |
n->next = next; |
rcu_assign_pointer(hlist_pprev_rcu(n), n); |
next->pprev = &n->next; |
} |
/** |
* hlist_add_after_rcu |
* @prev: the existing element to add the new element after. |
* @n: the new element to add to the hash list. |
* |
* Description: |
* Adds the specified element to the specified hlist |
* after the specified node while permitting racing traversals. |
* |
* The caller must take whatever precautions are necessary |
* (such as holding appropriate locks) to avoid racing |
* with another list-mutation primitive, such as hlist_add_head_rcu() |
* or hlist_del_rcu(), running on this same list. |
* However, it is perfectly legal to run concurrently with |
* the _rcu list-traversal primitives, such as |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. |
*/ |
static inline void hlist_add_after_rcu(struct hlist_node *prev, |
struct hlist_node *n) |
{ |
n->next = prev->next; |
n->pprev = &prev->next; |
rcu_assign_pointer(hlist_next_rcu(prev), n); |
if (n->next) |
n->next->pprev = &n->next; |
} |
#define __hlist_for_each_rcu(pos, head) \ |
for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
pos; \ |
pos = rcu_dereference(hlist_next_rcu(pos))) |
/** |
* hlist_for_each_entry_rcu - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as hlist_add_head_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
*/ |
#define hlist_for_each_entry_rcu(pos, head, member) \ |
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\ |
&(pos)->member)), typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing) |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as hlist_add_head_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
* |
* This is the same as hlist_for_each_entry_rcu() except that it does |
* not do any RCU debugging or tracing. |
*/ |
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \ |
for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\ |
&(pos)->member)), typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
* |
* This list-traversal primitive may safely run concurrently with |
* the _rcu list-mutation primitives such as hlist_add_head_rcu() |
* as long as the traversal is guarded by rcu_read_lock(). |
*/ |
#define hlist_for_each_entry_rcu_bh(pos, head, member) \ |
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\ |
&(pos)->member)), typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue_rcu(pos, member) \ |
for (pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\ |
typeof(*(pos)), member)) |
/** |
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point |
* @pos: the type * to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \ |
for (pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member); \ |
pos; \ |
pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ |
typeof(*(pos)), member)) |
#endif /* __KERNEL__ */ |
#endif |
/drivers/include/linux/slab.h |
---|
1,3 → 1,14 |
/* |
* Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
* |
* (C) SGI 2006, Christoph Lameter |
* Cleaned up and restructured to ease the addition of alternative |
* implementations of SLAB allocators. |
*/ |
#ifndef _LINUX_SLAB_H |
#define _LINUX_SLAB_H |
#include <errno.h> |
// stub |
#endif /* _LINUX_SLAB_H */ |
/drivers/include/linux/spinlock_up.h |
---|
14,7 → 14,10 |
* In the debug case, 1 means unlocked, 0 means locked. (the values |
* are inverted, to catch initialization bugs) |
* |
* No atomicity anywhere, we are on UP. |
* No atomicity anywhere, we are on UP. However, we still need |
* the compiler barriers, because we do not want the compiler to |
* move potentially faulting instructions (notably user accesses) |
* into the locked sequence, resulting in non-atomic execution. |
*/ |
#ifdef CONFIG_DEBUG_SPINLOCK |
/drivers/include/linux/string.h |
---|
142,4 → 142,15 |
extern size_t memweight(const void *ptr, size_t bytes); |
/** |
* kbasename - return the last part of a pathname. |
* |
* @path: path to extract the filename from. |
*/ |
static inline const char *kbasename(const char *path) |
{ |
const char *tail = strrchr(path, '/'); |
return tail ? tail + 1 : path; |
} |
#endif /* _LINUX_STRING_H_ */ |
/drivers/include/linux/time.h |
---|
0,0 → 1,270 |
#ifndef _LINUX_TIME_H |
#define _LINUX_TIME_H |
//# include <linux/cache.h> |
//# include <linux/seqlock.h> |
# include <linux/math64.h> |
//#include <uapi/linux/time.h> |
extern struct timezone sys_tz; |
/* Parameters used to convert the timespec values: */ |
#define MSEC_PER_SEC 1000L |
#define USEC_PER_MSEC 1000L |
#define NSEC_PER_USEC 1000L |
#define NSEC_PER_MSEC 1000000L |
#define USEC_PER_SEC 1000000L |
#define NSEC_PER_SEC 1000000000L |
#define FSEC_PER_SEC 1000000000000000LL |
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
static inline int timespec_equal(const struct timespec *a, |
const struct timespec *b) |
{ |
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); |
} |
/* |
* lhs < rhs: return <0 |
* lhs == rhs: return 0 |
* lhs > rhs: return >0 |
*/ |
static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) |
{ |
if (lhs->tv_sec < rhs->tv_sec) |
return -1; |
if (lhs->tv_sec > rhs->tv_sec) |
return 1; |
return lhs->tv_nsec - rhs->tv_nsec; |
} |
static inline int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) |
{ |
if (lhs->tv_sec < rhs->tv_sec) |
return -1; |
if (lhs->tv_sec > rhs->tv_sec) |
return 1; |
return lhs->tv_usec - rhs->tv_usec; |
} |
extern unsigned long mktime(const unsigned int year, const unsigned int mon, |
const unsigned int day, const unsigned int hour, |
const unsigned int min, const unsigned int sec); |
extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
/* |
* timespec_add_safe assumes both values are positive and checks |
* for overflow. It will return TIME_T_MAX if the reutrn would be |
* smaller then either of the arguments. |
*/ |
extern struct timespec timespec_add_safe(const struct timespec lhs, |
const struct timespec rhs); |
static inline struct timespec timespec_add(struct timespec lhs, |
struct timespec rhs) |
{ |
struct timespec ts_delta; |
set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, |
lhs.tv_nsec + rhs.tv_nsec); |
return ts_delta; |
} |
/* |
* sub = lhs - rhs, in normalized form |
*/ |
static inline struct timespec timespec_sub(struct timespec lhs, |
struct timespec rhs) |
{ |
struct timespec ts_delta; |
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, |
lhs.tv_nsec - rhs.tv_nsec); |
return ts_delta; |
} |
#define KTIME_MAX ((s64)~((u64)1 << 63)) |
#if (BITS_PER_LONG == 64) |
# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) |
#else |
# define KTIME_SEC_MAX LONG_MAX |
#endif |
/* |
* Returns true if the timespec is norm, false if denorm: |
*/ |
static inline bool timespec_valid(const struct timespec *ts) |
{ |
/* Dates before 1970 are bogus */ |
if (ts->tv_sec < 0) |
return false; |
/* Can't have more nanoseconds then a second */ |
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
return false; |
return true; |
} |
static inline bool timespec_valid_strict(const struct timespec *ts) |
{ |
if (!timespec_valid(ts)) |
return false; |
/* Disallow values that could overflow ktime_t */ |
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) |
return false; |
return true; |
} |
extern bool persistent_clock_exist; |
static inline bool has_persistent_clock(void) |
{ |
return persistent_clock_exist; |
} |
extern void read_persistent_clock(struct timespec *ts); |
extern void read_boot_clock(struct timespec *ts); |
extern int persistent_clock_is_local; |
extern int update_persistent_clock(struct timespec now); |
void timekeeping_init(void); |
extern int timekeeping_suspended; |
unsigned long get_seconds(void); |
struct timespec current_kernel_time(void); |
struct timespec __current_kernel_time(void); /* does not take xtime_lock */ |
struct timespec get_monotonic_coarse(void); |
void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, |
struct timespec *wtom, struct timespec *sleep); |
void timekeeping_inject_sleeptime(struct timespec *delta); |
#define CURRENT_TIME (current_kernel_time()) |
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
/* Some architectures do not supply their own clocksource. |
* This is mainly the case in architectures that get their |
* inter-tick times by reading the counter on their interval |
* timer. Since these timers wrap every tick, they're not really |
* useful as clocksources. Wrapping them to act like one is possible |
* but not very efficient. So we provide a callout these arches |
* can implement for use with the jiffies clocksource to provide |
* finer then tick granular time. |
*/ |
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
extern u32 (*arch_gettimeoffset)(void); |
#endif |
extern void do_gettimeofday(struct timeval *tv); |
extern int do_settimeofday(const struct timespec *tv); |
extern int do_sys_settimeofday(const struct timespec *tv, |
const struct timezone *tz); |
#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) |
extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); |
struct itimerval; |
extern int do_setitimer(int which, struct itimerval *value, |
struct itimerval *ovalue); |
extern unsigned int alarm_setitimer(unsigned int seconds); |
extern int do_getitimer(int which, struct itimerval *value); |
extern int __getnstimeofday(struct timespec *tv); |
extern void getnstimeofday(struct timespec *tv); |
extern void getrawmonotonic(struct timespec *ts); |
extern void getnstime_raw_and_real(struct timespec *ts_raw, |
struct timespec *ts_real); |
extern void getboottime(struct timespec *ts); |
extern void monotonic_to_bootbased(struct timespec *ts); |
extern void get_monotonic_boottime(struct timespec *ts); |
extern struct timespec timespec_trunc(struct timespec t, unsigned gran); |
extern int timekeeping_valid_for_hres(void); |
extern u64 timekeeping_max_deferment(void); |
extern int timekeeping_inject_offset(struct timespec *ts); |
extern s32 timekeeping_get_tai_offset(void); |
extern void timekeeping_set_tai_offset(s32 tai_offset); |
extern void timekeeping_clocktai(struct timespec *ts); |
struct tms; |
extern void do_sys_times(struct tms *); |
/* |
* Similar to the struct tm in userspace <time.h>, but it needs to be here so |
* that the kernel source is self contained. |
*/ |
struct tm { |
/* |
* the number of seconds after the minute, normally in the range |
* 0 to 59, but can be up to 60 to allow for leap seconds |
*/ |
int tm_sec; |
/* the number of minutes after the hour, in the range 0 to 59*/ |
int tm_min; |
/* the number of hours past midnight, in the range 0 to 23 */ |
int tm_hour; |
/* the day of the month, in the range 1 to 31 */ |
int tm_mday; |
/* the number of months since January, in the range 0 to 11 */ |
int tm_mon; |
/* the number of years since 1900 */ |
long tm_year; |
/* the number of days since Sunday, in the range 0 to 6 */ |
int tm_wday; |
/* the number of days since January 1, in the range 0 to 365 */ |
int tm_yday; |
}; |
void time_to_tm(time_t totalsecs, int offset, struct tm *result); |
/** |
* timespec_to_ns - Convert timespec to nanoseconds |
* @ts: pointer to the timespec variable to be converted |
* |
* Returns the scalar nanosecond representation of the timespec |
* parameter. |
*/ |
static inline s64 timespec_to_ns(const struct timespec *ts) |
{ |
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
} |
/** |
* timeval_to_ns - Convert timeval to nanoseconds |
* @ts: pointer to the timeval variable to be converted |
* |
* Returns the scalar nanosecond representation of the timeval |
* parameter. |
*/ |
static inline s64 timeval_to_ns(const struct timeval *tv) |
{ |
return ((s64) tv->tv_sec * NSEC_PER_SEC) + |
tv->tv_usec * NSEC_PER_USEC; |
} |
/** |
* ns_to_timespec - Convert nanoseconds to timespec |
* @nsec: the nanoseconds value to be converted |
* |
* Returns the timespec representation of the nsec parameter. |
*/ |
extern struct timespec ns_to_timespec(const s64 nsec); |
/** |
* ns_to_timeval - Convert nanoseconds to timeval |
* @nsec: the nanoseconds value to be converted |
* |
* Returns the timeval representation of the nsec parameter. |
*/ |
extern struct timeval ns_to_timeval(const s64 nsec); |
/** |
* timespec_add_ns - Adds nanoseconds to a timespec |
* @a: pointer to timespec to be incremented |
* @ns: unsigned nanoseconds value to be added |
* |
* This must always be inlined because its used from the x86-64 vdso, |
* which cannot call other kernel functions. |
*/ |
static __always_inline void timespec_add_ns(struct timespec *a, u64 ns) |
{ |
a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); |
a->tv_nsec = ns; |
} |
#endif |