/drivers/include/linux/acpi.h |
---|
37,6 → 37,8 |
#include <linux/list.h> |
#include <linux/mod_devicetable.h> |
#include <linux/dynamic_debug.h> |
#include <linux/module.h> |
#include <linux/mutex.h> |
#include <acpi/acpi_bus.h> |
#include <acpi/acpi_drivers.h> |
318,6 → 320,7 |
bool acpi_dev_resource_ext_address_space(struct acpi_resource *ares, |
struct resource_win *win); |
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); |
unsigned int acpi_dev_get_irq_type(int triggering, int polarity); |
bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, |
struct resource *res); |
920,7 → 923,7 |
return NULL; |
} |
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \ |
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, validate, data, fn) \ |
static const void * __acpi_table_##name[] \ |
__attribute__((unused)) \ |
= { (void *) table_id, \ |
/drivers/include/linux/backlight.h |
---|
11,6 → 11,8 |
#include <linux/device.h> |
#include <linux/fb.h> |
#include <linux/mutex.h> |
#include <linux/notifier.h> |
/* Notes on locking: |
* |
* backlight_device->ops_lock is an internal backlight lock protecting the |
43,4 → 45,6 |
BACKLIGHT_UNREGISTERED, |
}; |
struct backlight_device; |
struct fb_info; |
#endif |
/drivers/include/linux/bug.h |
---|
1,57 → 1,27 |
#ifndef _ASM_GENERIC_BUG_H |
#define _ASM_GENERIC_BUG_H |
#ifndef _LINUX_BUG_H |
#define _LINUX_BUG_H |
#include <asm/bug.h> |
#include <linux/compiler.h> |
int printf(const char *fmt, ...); |
enum bug_trap_type { |
BUG_TRAP_TYPE_NONE = 0, |
BUG_TRAP_TYPE_WARN = 1, |
BUG_TRAP_TYPE_BUG = 2, |
}; |
#define __WARN() printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
//#define __WARN_printf(arg...) printf("\nWARNING: at %s:%d\n", __FILE__, __LINE__) |
#define __WARN_printf(arg...) do { printf(arg); __WARN(); } while (0) |
struct pt_regs; |
#define WARN(condition, format...) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) \ |
__WARN_printf(format); \ |
unlikely(__ret_warn_on); \ |
}) |
#ifdef __CHECKER__ |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) |
#define BUILD_BUG_ON_ZERO(e) (0) |
#define BUILD_BUG_ON_NULL(e) ((void*)0) |
#define BUILD_BUG_ON_INVALID(e) (0) |
#define BUILD_BUG_ON_MSG(cond, msg) (0) |
#define BUILD_BUG_ON(condition) (0) |
#define BUILD_BUG() (0) |
#else /* __CHECKER__ */ |
#define WARN_ON(condition) ({ \ |
int __ret_warn_on = !!(condition); \ |
if (unlikely(__ret_warn_on)) \ |
__WARN(); \ |
unlikely(__ret_warn_on); \ |
}) |
#define WARN_ONCE(condition, format...) ({ \ |
static bool __warned; \ |
int __ret_warn_once = !!(condition); \ |
\ |
if (unlikely(__ret_warn_once)) \ |
if (WARN(!__warned, format)) \ |
__warned = true; \ |
unlikely(__ret_warn_once); \ |
}) |
#define WARN_ON_ONCE(condition) ({ \ |
static bool __warned; \ |
int __ret_warn_once = !!(condition); \ |
\ |
if (unlikely(__ret_warn_once)) \ |
if (WARN_ON(!__warned)) \ |
__warned = true; \ |
unlikely(__ret_warn_once); \ |
}) |
#define BUG() do { \ |
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ |
} while (0) |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
/* Force a compilation error if a constant expression is not a power of 2 */ |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ |
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) |
113,10 → 83,30 |
*/ |
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") |
#endif /* __CHECKER__ */ |
#ifdef CONFIG_GENERIC_BUG |
#include <asm-generic/bug.h> |
static inline int is_warning_bug(const struct bug_entry *bug) |
{ |
return bug->flags & BUGFLAG_WARNING; |
} |
#define pr_warn_once(fmt, ...) \ |
printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) |
const struct bug_entry *find_bug(unsigned long bugaddr); |
#endif |
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); |
/* These are defined by the architecture */ |
int is_valid_bugaddr(unsigned long addr); |
#else /* !CONFIG_GENERIC_BUG */ |
static inline enum bug_trap_type report_bug(unsigned long bug_addr, |
struct pt_regs *regs) |
{ |
return BUG_TRAP_TYPE_BUG; |
} |
#endif /* CONFIG_GENERIC_BUG */ |
#endif /* _LINUX_BUG_H */ |
/drivers/include/linux/byteorder/little_endian.h |
---|
1,108 → 1,7 |
#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H |
#ifndef __LITTLE_ENDIAN |
#define __LITTLE_ENDIAN 1234 |
#endif |
#ifndef __LITTLE_ENDIAN_BITFIELD |
#define __LITTLE_ENDIAN_BITFIELD |
#endif |
#include <uapi/linux/byteorder/little_endian.h> |
#include <linux/types.h> |
#include <linux/swab.h> |
#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) |
#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) |
#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) |
#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) |
#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) |
#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) |
#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) |
#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) |
#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) |
#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) |
#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) |
#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) |
#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) |
#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) |
#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) |
#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) |
#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) |
#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) |
#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) |
#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) |
#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) |
#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) |
#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) |
#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) |
#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) |
#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) |
#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) |
#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) |
static inline __le64 __cpu_to_le64p(const __u64 *p) |
{ |
return (__force __le64)*p; |
} |
static inline __u64 __le64_to_cpup(const __le64 *p) |
{ |
return (__force __u64)*p; |
} |
static inline __le32 __cpu_to_le32p(const __u32 *p) |
{ |
return (__force __le32)*p; |
} |
static inline __u32 __le32_to_cpup(const __le32 *p) |
{ |
return (__force __u32)*p; |
} |
static inline __le16 __cpu_to_le16p(const __u16 *p) |
{ |
return (__force __le16)*p; |
} |
static inline __u16 __le16_to_cpup(const __le16 *p) |
{ |
return (__force __u16)*p; |
} |
static inline __be64 __cpu_to_be64p(const __u64 *p) |
{ |
return (__force __be64)__swab64p(p); |
} |
static inline __u64 __be64_to_cpup(const __be64 *p) |
{ |
return __swab64p((__u64 *)p); |
} |
static inline __be32 __cpu_to_be32p(const __u32 *p) |
{ |
return (__force __be32)__swab32p(p); |
} |
static inline __u32 __be32_to_cpup(const __be32 *p) |
{ |
return __swab32p((__u32 *)p); |
} |
static inline __be16 __cpu_to_be16p(const __u16 *p) |
{ |
return (__force __be16)__swab16p(p); |
} |
static inline __u16 __be16_to_cpup(const __be16 *p) |
{ |
return __swab16p((__u16 *)p); |
} |
#define __cpu_to_le64s(x) do { (void)(x); } while (0) |
#define __le64_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_le32s(x) do { (void)(x); } while (0) |
#define __le32_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_le16s(x) do { (void)(x); } while (0) |
#define __le16_to_cpus(x) do { (void)(x); } while (0) |
#define __cpu_to_be64s(x) __swab64s((x)) |
#define __be64_to_cpus(x) __swab64s((x)) |
#define __cpu_to_be32s(x) __swab32s((x)) |
#define __be32_to_cpus(x) __swab32s((x)) |
#define __cpu_to_be16s(x) __swab16s((x)) |
#define __be16_to_cpus(x) __swab16s((x)) |
#ifdef __KERNEL__ |
#include <linux/byteorder/generic.h> |
#endif |
#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ |
/drivers/include/linux/clocksource.h |
---|
62,12 → 62,18 |
* @suspend: suspend function for the clocksource, if necessary |
* @resume: resume function for the clocksource, if necessary |
* @owner: module reference, must be set by clocksource in modules |
* |
* Note: This struct is not used in hotpathes of the timekeeping code |
* because the timekeeper caches the hot path fields in its own data |
* structure, so no line cache alignment is required, |
* |
* The pointer to the clocksource itself is handed to the read |
* callback. If you need extra information there you can wrap struct |
* clocksource into your own struct. Depending on the amount of |
* information you need you should consider to cache line align that |
* structure. |
*/ |
struct clocksource { |
/* |
* Hotpath data, fits in a single cache line when the |
* clocksource itself is cacheline aligned. |
*/ |
cycle_t (*read)(struct clocksource *cs); |
cycle_t mask; |
u32 mult; |
95,7 → 101,7 |
cycle_t wd_last; |
#endif |
struct module *owner; |
} ____cacheline_aligned; |
}; |
/* |
* Clock source flags bits:: |
/drivers/include/linux/compiler-gcc.h |
---|
251,9 → 251,7 |
#endif |
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ |
#if GCC_VERSION >= 70000 |
#define KASAN_ABI_VERSION 5 |
#elif GCC_VERSION >= 50000 |
#if GCC_VERSION >= 50000 |
#define KASAN_ABI_VERSION 4 |
#elif GCC_VERSION >= 40902 |
#define KASAN_ABI_VERSION 3 |
/drivers/include/linux/compiler.h |
---|
299,6 → 299,23 |
__u.__val; \ |
}) |
/** |
* smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering |
* @cond: boolean expression to wait for |
* |
* Equivalent to using smp_load_acquire() on the condition variable but employs |
* the control dependency of the wait to reduce the barrier on many platforms. |
* |
* The control dependency provides a LOAD->STORE order, the additional RMB |
* provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, |
* aka. ACQUIRE. |
*/ |
#define smp_cond_acquire(cond) do { \ |
while (!(cond)) \ |
cpu_relax(); \ |
smp_rmb(); /* ctrl + rmb := acquire */ \ |
} while (0) |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
/drivers/include/linux/component.h |
---|
1,39 → 1,48 |
#ifndef COMPONENT_H |
#define COMPONENT_H |
#include <linux/stddef.h> |
struct device; |
struct component_ops { |
int (*bind)(struct device *, struct device *, void *); |
void (*unbind)(struct device *, struct device *, void *); |
int (*bind)(struct device *comp, struct device *master, |
void *master_data); |
void (*unbind)(struct device *comp, struct device *master, |
void *master_data); |
}; |
int component_add(struct device *, const struct component_ops *); |
void component_del(struct device *, const struct component_ops *); |
int component_bind_all(struct device *, void *); |
void component_unbind_all(struct device *, void *); |
int component_bind_all(struct device *master, void *master_data); |
void component_unbind_all(struct device *master, void *master_data); |
struct master; |
struct component_master_ops { |
int (*add_components)(struct device *, struct master *); |
int (*bind)(struct device *); |
void (*unbind)(struct device *); |
int (*bind)(struct device *master); |
void (*unbind)(struct device *master); |
}; |
int component_master_add(struct device *, const struct component_master_ops *); |
void component_master_del(struct device *, |
const struct component_master_ops *); |
int component_master_add_child(struct master *master, |
int (*compare)(struct device *, void *), void *compare_data); |
struct component_match; |
int component_master_add_with_match(struct device *, |
const struct component_master_ops *, struct component_match *); |
void component_match_add(struct device *, struct component_match **, |
void component_match_add_release(struct device *master, |
struct component_match **matchptr, |
void (*release)(struct device *, void *), |
int (*compare)(struct device *, void *), void *compare_data); |
static inline void component_match_add(struct device *master, |
struct component_match **matchptr, |
int (*compare)(struct device *, void *), void *compare_data) |
{ |
component_match_add_release(master, matchptr, NULL, compare, |
compare_data); |
} |
#endif |
/drivers/include/linux/cpumask.h |
---|
85,10 → 85,14 |
* only one CPU. |
*/ |
extern const struct cpumask *const cpu_possible_mask; |
extern const struct cpumask *const cpu_online_mask; |
extern const struct cpumask *const cpu_present_mask; |
extern const struct cpumask *const cpu_active_mask; |
extern struct cpumask __cpu_possible_mask; |
extern struct cpumask __cpu_online_mask; |
extern struct cpumask __cpu_present_mask; |
extern struct cpumask __cpu_active_mask; |
#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask) |
#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask) |
#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask) |
#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask) |
#if NR_CPUS > 1 |
#define num_online_cpus() cpumask_weight(cpu_online_mask) |
556,7 → 560,7 |
static inline int cpumask_parse_user(const char __user *buf, int len, |
struct cpumask *dstp) |
{ |
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids); |
} |
/** |
571,7 → 575,7 |
struct cpumask *dstp) |
{ |
return bitmap_parselist_user(buf, len, cpumask_bits(dstp), |
nr_cpumask_bits); |
nr_cpu_ids); |
} |
/** |
586,7 → 590,7 |
char *nl = strchr(buf, '\n'); |
unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf); |
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits); |
return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids); |
} |
/** |
598,7 → 602,7 |
*/ |
static inline int cpulist_parse(const char *buf, struct cpumask *dstp) |
{ |
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits); |
return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids); |
} |
/** |
716,14 → 720,49 |
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask) |
/* Wrappers for arch boot code to manipulate normally-constant masks */ |
void set_cpu_possible(unsigned int cpu, bool possible); |
void set_cpu_present(unsigned int cpu, bool present); |
void set_cpu_online(unsigned int cpu, bool online); |
void set_cpu_active(unsigned int cpu, bool active); |
void init_cpu_present(const struct cpumask *src); |
void init_cpu_possible(const struct cpumask *src); |
void init_cpu_online(const struct cpumask *src); |
static inline void |
set_cpu_possible(unsigned int cpu, bool possible) |
{ |
if (possible) |
cpumask_set_cpu(cpu, &__cpu_possible_mask); |
else |
cpumask_clear_cpu(cpu, &__cpu_possible_mask); |
} |
static inline void |
set_cpu_present(unsigned int cpu, bool present) |
{ |
if (present) |
cpumask_set_cpu(cpu, &__cpu_present_mask); |
else |
cpumask_clear_cpu(cpu, &__cpu_present_mask); |
} |
static inline void |
set_cpu_online(unsigned int cpu, bool online) |
{ |
if (online) { |
cpumask_set_cpu(cpu, &__cpu_online_mask); |
cpumask_set_cpu(cpu, &__cpu_active_mask); |
} else { |
cpumask_clear_cpu(cpu, &__cpu_online_mask); |
} |
} |
static inline void |
set_cpu_active(unsigned int cpu, bool active) |
{ |
if (active) |
cpumask_set_cpu(cpu, &__cpu_active_mask); |
else |
cpumask_clear_cpu(cpu, &__cpu_active_mask); |
} |
/** |
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask * |
* @bitmap: the bitmap |
/drivers/include/linux/dma-attrs.h |
---|
41,7 → 41,6 |
bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); |
} |
#ifdef CONFIG_HAVE_DMA_ATTRS |
/** |
* dma_set_attr - set a specific attribute |
* @attr: attribute to set |
67,14 → 66,5 |
BUG_ON(attr >= DMA_ATTR_MAX); |
return test_bit(attr, attrs->flags); |
} |
#else /* !CONFIG_HAVE_DMA_ATTRS */ |
static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) |
{ |
} |
static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) |
{ |
return 0; |
} |
#endif /* CONFIG_HAVE_DMA_ATTRS */ |
#endif /* _DMA_ATTR_H */ |
/drivers/include/linux/dma-mapping.h |
---|
8,6 → 8,7 |
#include <linux/dma-attrs.h> |
#include <linux/dma-direction.h> |
#include <linux/scatterlist.h> |
#include <linux/bug.h> |
extern void * |
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
/drivers/include/linux/dmi.h |
---|
22,6 → 22,7 |
DMI_DEV_TYPE_IPMI = -1, |
DMI_DEV_TYPE_OEM_STRING = -2, |
DMI_DEV_TYPE_DEV_ONBOARD = -3, |
DMI_DEV_TYPE_DEV_SLOT = -4, |
}; |
enum dmi_entry_type { |
/drivers/include/linux/err.h |
---|
37,7 → 37,7 |
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) |
{ |
return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
return unlikely(!ptr) || IS_ERR_VALUE((unsigned long)ptr); |
} |
/** |
/drivers/include/linux/fb.h |
---|
1,413 → 1,19 |
#ifndef _LINUX_FB_H |
#define _LINUX_FB_H |
#include <linux/types.h> |
#include <linux/i2c.h> |
#include <linux/kgdb.h> |
#include <uapi/linux/fb.h> |
struct dentry; |
/* Definitions of frame buffers */ |
#define FB_MAX 32 /* sufficient for now */ |
/* ioctls |
0x46 is 'F' */ |
#define FBIOGET_VSCREENINFO 0x4600 |
#define FBIOPUT_VSCREENINFO 0x4601 |
#define FBIOGET_FSCREENINFO 0x4602 |
#define FBIOGETCMAP 0x4604 |
#define FBIOPUTCMAP 0x4605 |
#define FBIOPAN_DISPLAY 0x4606 |
#ifdef __KERNEL__ |
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor_user) |
#else |
#define FBIO_CURSOR _IOWR('F', 0x08, struct fb_cursor) |
#endif |
/* 0x4607-0x460B are defined below */ |
/* #define FBIOGET_MONITORSPEC 0x460C */ |
/* #define FBIOPUT_MONITORSPEC 0x460D */ |
/* #define FBIOSWITCH_MONIBIT 0x460E */ |
#define FBIOGET_CON2FBMAP 0x460F |
#define FBIOPUT_CON2FBMAP 0x4610 |
#define FBIOBLANK 0x4611 /* arg: 0 or vesa level + 1 */ |
#define FBIOGET_VBLANK _IOR('F', 0x12, struct fb_vblank) |
#define FBIO_ALLOC 0x4613 |
#define FBIO_FREE 0x4614 |
#define FBIOGET_GLYPH 0x4615 |
#define FBIOGET_HWCINFO 0x4616 |
#define FBIOPUT_MODEINFO 0x4617 |
#define FBIOGET_DISPINFO 0x4618 |
#define FBIO_WAITFORVSYNC _IOW('F', 0x20, __u32) |
#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ |
#define FB_TYPE_PLANES 1 /* Non interleaved planes */ |
#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */ |
#define FB_TYPE_TEXT 3 /* Text/attributes */ |
#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */ |
#define FB_AUX_TEXT_MDA 0 /* Monochrome text */ |
#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */ |
#define FB_AUX_TEXT_S3_MMIO 2 /* S3 MMIO fasttext */ |
#define FB_AUX_TEXT_MGA_STEP16 3 /* MGA Millenium I: text, attr, 14 reserved bytes */ |
#define FB_AUX_TEXT_MGA_STEP8 4 /* other MGAs: text, attr, 6 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_GROUP 8 /* 8-15: SVGA tileblit compatible modes */ |
#define FB_AUX_TEXT_SVGA_MASK 7 /* lower three bits says step */ |
#define FB_AUX_TEXT_SVGA_STEP2 8 /* SVGA text mode: text, attr */ |
#define FB_AUX_TEXT_SVGA_STEP4 9 /* SVGA text mode: text, attr, 2 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_STEP8 10 /* SVGA text mode: text, attr, 6 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_STEP16 11 /* SVGA text mode: text, attr, 14 reserved bytes */ |
#define FB_AUX_TEXT_SVGA_LAST 15 /* reserved up to 15 */ |
#define FB_AUX_VGA_PLANES_VGA4 0 /* 16 color planes (EGA/VGA) */ |
#define FB_AUX_VGA_PLANES_CFB4 1 /* CFB4 in planes (VGA) */ |
#define FB_AUX_VGA_PLANES_CFB8 2 /* CFB8 in planes (VGA) */ |
#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ |
#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ |
#define FB_VISUAL_TRUECOLOR 2 /* True color */ |
#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ |
#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ |
#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ |
#define FB_ACCEL_NONE 0 /* no hardware accelerator */ |
#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */ |
#define FB_ACCEL_AMIGABLITT 2 /* Amiga Blitter */ |
#define FB_ACCEL_S3_TRIO64 3 /* Cybervision64 (S3 Trio64) */ |
#define FB_ACCEL_NCR_77C32BLT 4 /* RetinaZ3 (NCR 77C32BLT) */ |
#define FB_ACCEL_S3_VIRGE 5 /* Cybervision64/3D (S3 ViRGE) */ |
#define FB_ACCEL_ATI_MACH64GX 6 /* ATI Mach 64GX family */ |
#define FB_ACCEL_DEC_TGA 7 /* DEC 21030 TGA */ |
#define FB_ACCEL_ATI_MACH64CT 8 /* ATI Mach 64CT family */ |
#define FB_ACCEL_ATI_MACH64VT 9 /* ATI Mach 64CT family VT class */ |
#define FB_ACCEL_ATI_MACH64GT 10 /* ATI Mach 64CT family GT class */ |
#define FB_ACCEL_SUN_CREATOR 11 /* Sun Creator/Creator3D */ |
#define FB_ACCEL_SUN_CGSIX 12 /* Sun cg6 */ |
#define FB_ACCEL_SUN_LEO 13 /* Sun leo/zx */ |
#define FB_ACCEL_IMS_TWINTURBO 14 /* IMS Twin Turbo */ |
#define FB_ACCEL_3DLABS_PERMEDIA2 15 /* 3Dlabs Permedia 2 */ |
#define FB_ACCEL_MATROX_MGA2064W 16 /* Matrox MGA2064W (Millenium) */ |
#define FB_ACCEL_MATROX_MGA1064SG 17 /* Matrox MGA1064SG (Mystique) */ |
#define FB_ACCEL_MATROX_MGA2164W 18 /* Matrox MGA2164W (Millenium II) */ |
#define FB_ACCEL_MATROX_MGA2164W_AGP 19 /* Matrox MGA2164W (Millenium II) */ |
#define FB_ACCEL_MATROX_MGAG100 20 /* Matrox G100 (Productiva G100) */ |
#define FB_ACCEL_MATROX_MGAG200 21 /* Matrox G200 (Myst, Mill, ...) */ |
#define FB_ACCEL_SUN_CG14 22 /* Sun cgfourteen */ |
#define FB_ACCEL_SUN_BWTWO 23 /* Sun bwtwo */ |
#define FB_ACCEL_SUN_CGTHREE 24 /* Sun cgthree */ |
#define FB_ACCEL_SUN_TCX 25 /* Sun tcx */ |
#define FB_ACCEL_MATROX_MGAG400 26 /* Matrox G400 */ |
#define FB_ACCEL_NV3 27 /* nVidia RIVA 128 */ |
#define FB_ACCEL_NV4 28 /* nVidia RIVA TNT */ |
#define FB_ACCEL_NV5 29 /* nVidia RIVA TNT2 */ |
#define FB_ACCEL_CT_6555x 30 /* C&T 6555x */ |
#define FB_ACCEL_3DFX_BANSHEE 31 /* 3Dfx Banshee */ |
#define FB_ACCEL_ATI_RAGE128 32 /* ATI Rage128 family */ |
#define FB_ACCEL_IGS_CYBER2000 33 /* CyberPro 2000 */ |
#define FB_ACCEL_IGS_CYBER2010 34 /* CyberPro 2010 */ |
#define FB_ACCEL_IGS_CYBER5000 35 /* CyberPro 5000 */ |
#define FB_ACCEL_SIS_GLAMOUR 36 /* SiS 300/630/540 */ |
#define FB_ACCEL_3DLABS_PERMEDIA3 37 /* 3Dlabs Permedia 3 */ |
#define FB_ACCEL_ATI_RADEON 38 /* ATI Radeon family */ |
#define FB_ACCEL_I810 39 /* Intel 810/815 */ |
#define FB_ACCEL_SIS_GLAMOUR_2 40 /* SiS 315, 650, 740 */ |
#define FB_ACCEL_SIS_XABRE 41 /* SiS 330 ("Xabre") */ |
#define FB_ACCEL_I830 42 /* Intel 830M/845G/85x/865G */ |
#define FB_ACCEL_NV_10 43 /* nVidia Arch 10 */ |
#define FB_ACCEL_NV_20 44 /* nVidia Arch 20 */ |
#define FB_ACCEL_NV_30 45 /* nVidia Arch 30 */ |
#define FB_ACCEL_NV_40 46 /* nVidia Arch 40 */ |
#define FB_ACCEL_XGI_VOLARI_V 47 /* XGI Volari V3XT, V5, V8 */ |
#define FB_ACCEL_XGI_VOLARI_Z 48 /* XGI Volari Z7 */ |
#define FB_ACCEL_OMAP1610 49 /* TI OMAP16xx */ |
#define FB_ACCEL_TRIDENT_TGUI 50 /* Trident TGUI */ |
#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ |
#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ |
#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ |
#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ |
#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ |
#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ |
#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ |
#define FB_ACCEL_NEOMAGIC_NM2097 93 /* NeoMagic NM2097 */ |
#define FB_ACCEL_NEOMAGIC_NM2160 94 /* NeoMagic NM2160 */ |
#define FB_ACCEL_NEOMAGIC_NM2200 95 /* NeoMagic NM2200 */ |
#define FB_ACCEL_NEOMAGIC_NM2230 96 /* NeoMagic NM2230 */ |
#define FB_ACCEL_NEOMAGIC_NM2360 97 /* NeoMagic NM2360 */ |
#define FB_ACCEL_NEOMAGIC_NM2380 98 /* NeoMagic NM2380 */ |
#define FB_ACCEL_PXA3XX 99 /* PXA3xx */ |
#define FB_ACCEL_SAVAGE4 0x80 /* S3 Savage4 */ |
#define FB_ACCEL_SAVAGE3D 0x81 /* S3 Savage3D */ |
#define FB_ACCEL_SAVAGE3D_MV 0x82 /* S3 Savage3D-MV */ |
#define FB_ACCEL_SAVAGE2000 0x83 /* S3 Savage2000 */ |
#define FB_ACCEL_SAVAGE_MX_MV 0x84 /* S3 Savage/MX-MV */ |
#define FB_ACCEL_SAVAGE_MX 0x85 /* S3 Savage/MX */ |
#define FB_ACCEL_SAVAGE_IX_MV 0x86 /* S3 Savage/IX-MV */ |
#define FB_ACCEL_SAVAGE_IX 0x87 /* S3 Savage/IX */ |
#define FB_ACCEL_PROSAVAGE_PM 0x88 /* S3 ProSavage PM133 */ |
#define FB_ACCEL_PROSAVAGE_KM 0x89 /* S3 ProSavage KM133 */ |
#define FB_ACCEL_S3TWISTER_P 0x8a /* S3 Twister */ |
#define FB_ACCEL_S3TWISTER_K 0x8b /* S3 TwisterK */ |
#define FB_ACCEL_SUPERSAVAGE 0x8c /* S3 Supersavage */ |
#define FB_ACCEL_PROSAVAGE_DDR 0x8d /* S3 ProSavage DDR */ |
#define FB_ACCEL_PROSAVAGE_DDRK 0x8e /* S3 ProSavage DDR-K */ |
#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */ |
struct fb_fix_screeninfo { |
char id[16]; /* identification string eg "TT Builtin" */ |
unsigned long smem_start; /* Start of frame buffer mem */ |
/* (physical address) */ |
__u32 smem_len; /* Length of frame buffer mem */ |
__u32 type; /* see FB_TYPE_* */ |
__u32 type_aux; /* Interleave for interleaved Planes */ |
__u32 visual; /* see FB_VISUAL_* */ |
__u16 xpanstep; /* zero if no hardware panning */ |
__u16 ypanstep; /* zero if no hardware panning */ |
__u16 ywrapstep; /* zero if no hardware ywrap */ |
__u32 line_length; /* length of a line in bytes */ |
unsigned long mmio_start; /* Start of Memory Mapped I/O */ |
/* (physical address) */ |
__u32 mmio_len; /* Length of Memory Mapped I/O */ |
__u32 accel; /* Indicate to driver which */ |
/* specific chip/card we have */ |
__u16 reserved[3]; /* Reserved for future compatibility */ |
}; |
/* Interpretation of offset for color fields: All offsets are from the right, |
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you |
* can use the offset as right argument to <<). A pixel afterwards is a bit |
* stream and is written to video memory as that unmodified. |
* |
* For pseudocolor: offset and length should be the same for all color |
* components. Offset specifies the position of the least significant bit |
* of the pallette index in a pixel value. Length indicates the number |
* of available palette entries (i.e. # of entries = 1 << length). |
*/ |
struct fb_bitfield { |
__u32 offset; /* beginning of bitfield */ |
__u32 length; /* length of bitfield */ |
__u32 msb_right; /* != 0 : Most significant bit is */ |
/* right */ |
}; |
#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ |
#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ |
#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ |
#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ |
#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ |
#define FB_ACTIVATE_MASK 15 |
/* values */ |
#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ |
#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ |
#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ |
#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ |
#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ |
#define FB_ACCELF_TEXT 1 /* (OBSOLETE) see fb_info.flags and vc_mode */ |
#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ |
#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ |
#define FB_SYNC_EXT 4 /* external sync */ |
#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ |
#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ |
/* vtotal = 144d/288n/576i => PAL */ |
/* vtotal = 121d/242n/484i => NTSC */ |
#define FB_SYNC_ON_GREEN 32 /* sync on green */ |
#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ |
#define FB_VMODE_INTERLACED 1 /* interlaced */ |
#define FB_VMODE_DOUBLE 2 /* double scan */ |
#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ |
#define FB_VMODE_MASK 255 |
#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ |
#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ |
#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ |
/* |
* Display rotation support |
*/ |
#define FB_ROTATE_UR 0 |
#define FB_ROTATE_CW 1 |
#define FB_ROTATE_UD 2 |
#define FB_ROTATE_CCW 3 |
#define PICOS2KHZ(a) (1000000000UL/(a)) |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
struct fb_var_screeninfo { |
__u32 xres; /* visible resolution */ |
__u32 yres; |
__u32 xres_virtual; /* virtual resolution */ |
__u32 yres_virtual; |
__u32 xoffset; /* offset from virtual to visible */ |
__u32 yoffset; /* resolution */ |
__u32 bits_per_pixel; /* guess what */ |
__u32 grayscale; /* != 0 Graylevels instead of colors */ |
struct fb_bitfield red; /* bitfield in fb mem if true color, */ |
struct fb_bitfield green; /* else only length is significant */ |
struct fb_bitfield blue; |
struct fb_bitfield transp; /* transparency */ |
__u32 nonstd; /* != 0 Non standard pixel format */ |
__u32 activate; /* see FB_ACTIVATE_* */ |
__u32 height; /* height of picture in mm */ |
__u32 width; /* width of picture in mm */ |
__u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ |
/* Timing: All values in pixclocks, except pixclock (of course) */ |
__u32 pixclock; /* pixel clock in ps (pico seconds) */ |
__u32 left_margin; /* time from sync to picture */ |
__u32 right_margin; /* time from picture to sync */ |
__u32 upper_margin; /* time from sync to picture */ |
__u32 lower_margin; |
__u32 hsync_len; /* length of horizontal sync */ |
__u32 vsync_len; /* length of vertical sync */ |
__u32 sync; /* see FB_SYNC_* */ |
__u32 vmode; /* see FB_VMODE_* */ |
__u32 rotate; /* angle we rotate counter clockwise */ |
__u32 reserved[5]; /* Reserved for future compatibility */ |
}; |
struct fb_cmap { |
__u32 start; /* First entry */ |
__u32 len; /* Number of entries */ |
__u16 *red; /* Red values */ |
__u16 *green; |
__u16 *blue; |
__u16 *transp; /* transparency, can be NULL */ |
}; |
struct fb_con2fbmap { |
__u32 console; |
__u32 framebuffer; |
}; |
/* VESA Blanking Levels */ |
#define VESA_NO_BLANKING 0 |
#define VESA_VSYNC_SUSPEND 1 |
#define VESA_HSYNC_SUSPEND 2 |
#define VESA_POWERDOWN 3 |
enum { |
/* screen: unblanked, hsync: on, vsync: on */ |
FB_BLANK_UNBLANK = VESA_NO_BLANKING, |
/* screen: blanked, hsync: on, vsync: on */ |
FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, |
/* screen: blanked, hsync: on, vsync: off */ |
FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, |
/* screen: blanked, hsync: off, vsync: on */ |
FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, |
/* screen: blanked, hsync: off, vsync: off */ |
FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 |
}; |
#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ |
#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ |
#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ |
#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ |
#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ |
#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ |
#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ |
#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ |
#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ |
struct fb_vblank { |
__u32 flags; /* FB_VBLANK flags */ |
__u32 count; /* counter of retraces since boot */ |
__u32 vcount; /* current scanline position */ |
__u32 hcount; /* current scandot position */ |
__u32 reserved[4]; /* reserved for future compatibility */ |
}; |
/* Internal HW accel */ |
#define ROP_COPY 0 |
#define ROP_XOR 1 |
struct fb_copyarea { |
__u32 dx; |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 sx; |
__u32 sy; |
}; |
struct fb_fillrect { |
__u32 dx; /* screen-relative */ |
__u32 dy; |
__u32 width; |
__u32 height; |
__u32 color; |
__u32 rop; |
}; |
struct fb_image { |
__u32 dx; /* Where to place image */ |
__u32 dy; |
__u32 width; /* Size of image */ |
__u32 height; |
__u32 fg_color; /* Only used when a mono bitmap */ |
__u32 bg_color; |
__u8 depth; /* Depth of the image */ |
const char *data; /* Pointer to image data */ |
struct fb_cmap cmap; /* color map info */ |
}; |
/* |
* hardware cursor control |
*/ |
#define FB_CUR_SETIMAGE 0x01 |
#define FB_CUR_SETPOS 0x02 |
#define FB_CUR_SETHOT 0x04 |
#define FB_CUR_SETCMAP 0x08 |
#define FB_CUR_SETSHAPE 0x10 |
#define FB_CUR_SETSIZE 0x20 |
#define FB_CUR_SETALL 0xFF |
struct fbcurpos { |
__u16 x, y; |
}; |
struct fb_cursor { |
__u16 set; /* what to set */ |
__u16 enable; /* cursor on/off */ |
__u16 rop; /* bitop operation */ |
const char *mask; /* cursor mask bits */ |
struct fbcurpos hot; /* cursor hot spot */ |
struct fb_image image; /* Cursor image */ |
}; |
#ifdef CONFIG_FB_BACKLIGHT |
/* Settings for the generic backlight code */ |
#define FB_BACKLIGHT_LEVELS 128 |
#define FB_BACKLIGHT_MAX 0xFF |
#endif |
//#ifdef __KERNEL__ |
//#include <linux/fs.h> |
//#include <linux/init.h> |
//#include <linux/device.h> |
//#include <linux/workqueue.h> |
//#include <linux/notifier.h> |
#include <linux/fs.h> |
#include <linux/init.h> |
#include <linux/workqueue.h> |
#include <linux/notifier.h> |
#include <linux/list.h> |
#include <linux/mutex.h> |
//#include <linux/backlight.h> |
#include <linux/backlight.h> |
#include <linux/slab.h> |
//#include <asm/io.h> |
#include <asm/io.h> |
struct vm_area_struct; |
struct fb_info; |
569,7 → 175,27 |
u32 flags; |
}; |
#ifdef CONFIG_FB_NOTIFY |
extern int fb_register_client(struct notifier_block *nb); |
extern int fb_unregister_client(struct notifier_block *nb); |
extern int fb_notifier_call_chain(unsigned long val, void *v); |
#else |
static inline int fb_register_client(struct notifier_block *nb) |
{ |
return 0; |
}; |
static inline int fb_unregister_client(struct notifier_block *nb) |
{ |
return 0; |
}; |
static inline int fb_notifier_call_chain(unsigned long val, void *v) |
{ |
return 0; |
}; |
#endif |
/* |
* Pixmap structure definition |
* |
1050,6 → 676,13 |
} |
/* drivers/video/fb_defio.c */ |
extern void fb_deferred_io_init(struct fb_info *info); |
extern void fb_deferred_io_open(struct fb_info *info, |
struct inode *inode, |
struct file *file); |
extern void fb_deferred_io_cleanup(struct fb_info *info); |
extern int fb_deferred_io_fsync(struct file *file, loff_t start, |
loff_t end, int datasync); |
static inline bool fb_be_math(struct fb_info *info) |
{ |
/drivers/include/linux/fs.h |
---|
94,4 → 94,5 |
#define FL_UNLOCK_PENDING 512 /* Lease is being broken */ |
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */ |
#define FL_LAYOUT 2048 /* outstanding pNFS layout */ |
struct inode; |
#endif /* _LINUX_FS_H */ |
/drivers/include/linux/gfp.h |
---|
2,7 → 2,7 |
#define __LINUX_GFP_H |
#include <linux/mmdebug.h> |
#include <linux/types.h> |
#include <linux/mmzone.h> |
#include <linux/stddef.h> |
#include <linux/linkage.h> |
29,7 → 29,7 |
#define ___GFP_HARDWALL 0x20000u |
#define ___GFP_THISNODE 0x40000u |
#define ___GFP_ATOMIC 0x80000u |
#define ___GFP_NOACCOUNT 0x100000u |
#define ___GFP_ACCOUNT 0x100000u |
#define ___GFP_NOTRACK 0x200000u |
#define ___GFP_DIRECT_RECLAIM 0x400000u |
#define ___GFP_OTHER_NODE 0x800000u |
72,11 → 72,15 |
* |
* __GFP_THISNODE forces the allocation to be satisified from the requested |
* node with no fallbacks or placement policy enforcements. |
* |
* __GFP_ACCOUNT causes the allocation to be accounted to kmemcg (only relevant |
* to kmem allocations). |
*/ |
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) |
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) |
#define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) |
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) |
#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) |
/* |
* Watermark modifiers -- controls access to emergency reserves |
103,7 → 107,6 |
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH) |
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC) |
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC) |
#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) |
/* |
* Reclaim modifiers |
196,6 → 199,9 |
* GFP_KERNEL is typical for kernel-internal allocations. The caller requires |
* ZONE_NORMAL or a lower zone for direct access but can direct reclaim. |
* |
* GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is |
* accounted to kmemcg. |
* |
* GFP_NOWAIT is for kernel allocations that should not stall for direct |
* reclaim, start physical IO or use any filesystem callback. |
* |
235,6 → 241,7 |
*/ |
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) |
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS) |
#define GFP_KERNEL_ACCOUNT (GFP_KERNEL | __GFP_ACCOUNT) |
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) |
#define GFP_NOIO (__GFP_RECLAIM) |
#define GFP_NOFS (__GFP_RECLAIM | __GFP_IO) |
249,16 → 256,9 |
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \ |
~__GFP_KSWAPD_RECLAIM) |
/* Convert GFP flags to their corresponding migrate type */ |
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) |
#define GFP_MOVABLE_SHIFT 3 |
#undef GFP_MOVABLE_MASK |
#undef GFP_MOVABLE_SHIFT |
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) |
{ |
return (bool __force)(gfp_flags & __GFP_DIRECT_RECLAIM); |
return !!(gfp_flags & __GFP_DIRECT_RECLAIM); |
} |
#ifdef CONFIG_HIGHMEM |
/drivers/include/linux/hashtable.h |
---|
16,6 → 16,10 |
struct hlist_head name[1 << (bits)] = \ |
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } |
#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ |
struct hlist_head name[1 << (bits)] __read_mostly = \ |
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } |
#define DECLARE_HASHTABLE(name, bits) \ |
struct hlist_head name[1 << (bits)] |
/drivers/include/linux/i2c.h |
---|
30,6 → 30,7 |
#include <linux/device.h> /* for struct device */ |
#include <linux/sched.h> /* for completion */ |
#include <linux/mutex.h> |
#include <linux/swab.h> /* for swab16 */ |
#include <linux/jiffies.h> |
extern struct bus_type i2c_bus_type; |
/drivers/include/linux/idr.h |
---|
135,6 → 135,20 |
#define idr_for_each_entry(idp, entry, id) \ |
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) |
/** |
* idr_for_each_entry - continue iteration over an idr's elements of a given type |
* @idp: idr handle |
* @entry: the type * to use as cursor |
* @id: id entry's key |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
*/ |
#define idr_for_each_entry_continue(idp, entry, id) \ |
for ((entry) = idr_get_next((idp), &(id)); \ |
entry; \ |
++id, (entry) = idr_get_next((idp), &(id))) |
/* |
* IDA - IDR based id allocator, use when translation from id to |
* pointer isn't necessary. |
/drivers/include/linux/interrupt.h |
---|
65,6 → 65,17 |
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
/* |
* These values can be returned by request_any_context_irq() and |
* describe the context the interrupt will be run in. |
* |
* IRQC_IS_HARDIRQ - interrupt runs in hardirq context |
* IRQC_IS_NESTED - interrupt runs in a nested threaded context |
*/ |
enum { |
IRQC_IS_HARDIRQ = 0, |
IRQC_IS_NESTED, |
}; |
extern int early_irq_init(void); |
extern int arch_probe_nr_irqs(void); |
extern int arch_early_irq_init(void); |
/drivers/include/linux/io.h |
---|
22,6 → 22,14 |
#include <linux/init.h> |
#include <linux/bug.h> |
#include <linux/err.h> |
#include <asm/io.h> |
struct device; |
struct resource; |
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); |
void __ioread32_copy(void *to, const void __iomem *from, size_t count); |
void __iowrite64_copy(void __iomem *to, const void *from, size_t count); |
void *memremap(resource_size_t offset, size_t size, unsigned long flags); |
void memunmap(void *addr); |
#endif /* _LINUX_IO_H */ |
/drivers/include/linux/ioport.h |
---|
181,5 → 181,13 |
} |
/* Convenience shorthand with allocation */ |
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) |
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED) |
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) |
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) |
#define request_mem_region_exclusive(start,n,name) \ |
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) |
#define rename_region(region, newname) do { (region)->name = (newname); } while (0) |
#endif /* __ASSEMBLY__ */ |
#endif /* _LINUX_IOPORT_H */ |
/drivers/include/linux/jiffies.h |
---|
5,12 → 5,11 |
#include <linux/kernel.h> |
#include <linux/types.h> |
#include <linux/time.h> |
//#include <linux/timex.h> |
#include <linux/timex.h> |
//#include <asm/param.h> /* for HZ */ |
#define HZ 100 |
#define CLOCK_TICK_RATE 1193182ul |
/* |
* The following defines establish the engineering parameters of the PLL |
/drivers/include/linux/kernel.h |
---|
714,32 → 714,6 |
# define del_timer_sync(t) del_timer(t) |
#define build_mmio_read(name, size, type, reg, barrier) \ |
static inline type name(const volatile void __iomem *addr) \ |
{ type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
:"m" (*(volatile type __force *)addr) barrier); return ret; } |
#define build_mmio_write(name, size, type, reg, barrier) \ |
static inline void name(type val, volatile void __iomem *addr) \ |
{ asm volatile("mov" size " %0,%1": :reg (val), \ |
"m" (*(volatile type __force *)addr) barrier); } |
build_mmio_read(readb, "b", unsigned char, "=q", :"memory") |
build_mmio_read(readw, "w", unsigned short, "=r", :"memory") |
build_mmio_read(readl, "l", unsigned int, "=r", :"memory") |
build_mmio_read(__readb, "b", unsigned char, "=q", ) |
build_mmio_read(__readw, "w", unsigned short, "=r", ) |
build_mmio_read(__readl, "l", unsigned int, "=r", ) |
build_mmio_write(writeb, "b", unsigned char, "q", :"memory") |
build_mmio_write(writew, "w", unsigned short, "r", :"memory") |
build_mmio_write(writel, "l", unsigned int, "r", :"memory") |
build_mmio_write(__writeb, "b", unsigned char, "q", ) |
build_mmio_write(__writew, "w", unsigned short, "r", ) |
build_mmio_write(__writel, "l", unsigned int, "r", ) |
#define readb_relaxed(a) __readb(a) |
#define readw_relaxed(a) __readw(a) |
#define readl_relaxed(a) __readl(a) |
872,6 → 846,14 |
return __copy_to_user(to, from, n); |
} |
#define CAP_SYS_ADMIN 21 |
static inline bool capable(int cap) |
{ |
return true; |
} |
void *kmap(struct page *page); |
void *kmap_atomic(struct page *page); |
void kunmap(struct page *page); |
879,10 → 861,14 |
typedef u64 async_cookie_t; |
#define iowrite32(v, addr) writel((v), (addr)) |
//#define iowrite32(v, addr) writel((v), (addr)) |
#define __init |
#define CONFIG_PAGE_OFFSET 0 |
typedef long long __kernel_long_t; |
typedef unsigned long long __kernel_ulong_t; |
#define __kernel_long_t __kernel_long_t |
#endif |
/drivers/include/linux/list.h |
---|
24,7 → 24,7 |
static inline void INIT_LIST_HEAD(struct list_head *list) |
{ |
list->next = list; |
WRITE_ONCE(list->next, list); |
list->prev = list; |
} |
42,7 → 42,7 |
next->prev = new; |
new->next = next; |
new->prev = prev; |
prev->next = new; |
WRITE_ONCE(prev->next, new); |
} |
#else |
extern void __list_add(struct list_head *new, |
186,7 → 186,7 |
*/ |
static inline int list_empty(const struct list_head *head) |
{ |
return head->next == head; |
return READ_ONCE(head->next) == head; |
} |
/** |
608,7 → 608,7 |
static inline int hlist_empty(const struct hlist_head *h) |
{ |
return !h->first; |
return !READ_ONCE(h->first); |
} |
static inline void __hlist_del(struct hlist_node *n) |
642,7 → 642,7 |
n->next = first; |
if (first) |
first->pprev = &n->next; |
h->first = n; |
WRITE_ONCE(h->first, n); |
n->pprev = &h->first; |
} |
653,7 → 653,7 |
n->pprev = next->pprev; |
n->next = next; |
next->pprev = &n->next; |
*(n->pprev) = n; |
WRITE_ONCE(*(n->pprev), n); |
} |
static inline void hlist_add_behind(struct hlist_node *n, |
660,7 → 660,7 |
struct hlist_node *prev) |
{ |
n->next = prev->next; |
prev->next = n; |
WRITE_ONCE(prev->next, n); |
n->pprev = &prev->next; |
if (n->next) |
/drivers/include/linux/lockdep.h |
---|
66,7 → 66,7 |
/* |
* class-hash: |
*/ |
struct list_head hash_entry; |
struct hlist_node hash_entry; |
/* |
* global list of all lock-classes: |
199,7 → 199,7 |
u8 irq_context; |
u8 depth; |
u16 base; |
struct list_head entry; |
struct hlist_node entry; |
u64 chain_key; |
}; |
/drivers/include/linux/log2.h |
---|
16,6 → 16,12 |
#include <linux/bitops.h> |
/* |
* deal with unrepresentable constant logarithms |
*/ |
extern __attribute__((const, noreturn)) |
int ____ilog2_NaN(void); |
/* |
* non-constant log of base 2 calculators |
* - the arch may override these in asm/bitops.h if they can be implemented |
* more efficiently than using fls() and fls64() |
79,7 → 85,7 |
#define ilog2(n) \ |
( \ |
__builtin_constant_p(n) ? ( \ |
(n) < 2 ? 0 : \ |
(n) < 1 ? ____ilog2_NaN() : \ |
(n) & (1ULL << 63) ? 63 : \ |
(n) & (1ULL << 62) ? 62 : \ |
(n) & (1ULL << 61) ? 61 : \ |
142,7 → 148,10 |
(n) & (1ULL << 4) ? 4 : \ |
(n) & (1ULL << 3) ? 3 : \ |
(n) & (1ULL << 2) ? 2 : \ |
1 ) : \ |
(n) & (1ULL << 1) ? 1 : \ |
(n) & (1ULL << 0) ? 0 : \ |
____ilog2_NaN() \ |
) : \ |
(sizeof(n) <= 4) ? \ |
__ilog2_u32(n) : \ |
__ilog2_u64(n) \ |
194,17 → 203,6 |
* ... and so on. |
*/ |
static inline __attribute_const__ |
int __order_base_2(unsigned long n) |
{ |
return n > 1 ? ilog2(n - 1) + 1 : 0; |
} |
#define order_base_2(n) ilog2(roundup_pow_of_two(n)) |
#define order_base_2(n) \ |
( \ |
__builtin_constant_p(n) ? ( \ |
((n) == 0 || (n) == 1) ? 0 : \ |
ilog2((n) - 1) + 1) : \ |
__order_base_2(n) \ |
) |
#endif /* _LINUX_LOG2_H */ |
/drivers/include/linux/mmdebug.h |
---|
56,4 → 56,10 |
#define VIRTUAL_BUG_ON(cond) do { } while (0) |
#endif |
#ifdef CONFIG_DEBUG_VM_PGFLAGS |
#define VM_BUG_ON_PGFLAGS(cond, page) VM_BUG_ON_PAGE(cond, page) |
#else |
#define VM_BUG_ON_PGFLAGS(cond, page) BUILD_BUG_ON_INVALID(cond) |
#endif |
#endif |
/drivers/include/linux/mmzone.h |
---|
0,0 → 1,54 |
#ifndef _LINUX_MMZONE_H |
#define _LINUX_MMZONE_H |
#include <linux/spinlock.h> |
#include <linux/list.h> |
#include <linux/wait.h> |
#include <linux/bitops.h> |
#include <linux/atomic.h> |
/* Free memory management - zoned buddy allocator. */ |
#ifndef CONFIG_FORCE_MAX_ZONEORDER |
#define MAX_ORDER 11 |
#else |
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER |
#endif |
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) |
/* |
* PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed |
* costly to service. That is between allocation orders which should |
* coalesce naturally under reasonable reclaim pressure and those which |
* will not. |
*/ |
#define PAGE_ALLOC_COSTLY_ORDER 3 |
enum { |
MIGRATE_UNMOVABLE, |
MIGRATE_MOVABLE, |
MIGRATE_RECLAIMABLE, |
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ |
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, |
#ifdef CONFIG_CMA |
/* |
* MIGRATE_CMA migration type is designed to mimic the way |
* ZONE_MOVABLE works. Only movable pages can be allocated |
* from MIGRATE_CMA pageblocks and page allocator never |
* implicitly change migration type of MIGRATE_CMA pageblock. |
* |
* The way to use it is to change migratetype of a range of |
* pageblocks to MIGRATE_CMA which can be done by |
* __free_pageblock_cma() function. What is important though |
* is that a range of pageblocks must be aligned to |
* MAX_ORDER_NR_PAGES should biggest page be bigger then |
* a single pageblock. |
*/ |
MIGRATE_CMA, |
#endif |
#ifdef CONFIG_MEMORY_ISOLATION |
MIGRATE_ISOLATE, /* can't allocate from here */ |
#endif |
MIGRATE_TYPES |
}; |
#endif /* _LINUX_MMZONE_H */ |
/drivers/include/linux/mod_devicetable.h |
---|
404,7 → 404,7 |
* For Hyper-V devices we use the device guid as the id. |
*/ |
struct hv_vmbus_device_id { |
__u8 guid[16]; |
uuid_le guid; |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/drivers/include/linux/notifier.h |
---|
0,0 → 1,69 |
/* |
* Routines to manage notifier chains for passing status changes to any |
* interested routines. We need this instead of hard coded call lists so |
* that modules can poke their nose into the innards. The network devices |
* needed them so here they are for the rest of you. |
* |
* Alan Cox <Alan.Cox@linux.org> |
*/ |
#ifndef _LINUX_NOTIFIER_H |
#define _LINUX_NOTIFIER_H |
#include <linux/errno.h> |
#include <linux/mutex.h> |
#include <linux/rwsem.h> |
/* |
* Notifier chains are of four types: |
* |
* Atomic notifier chains: Chain callbacks run in interrupt/atomic |
* context. Callouts are not allowed to block. |
* Blocking notifier chains: Chain callbacks run in process context. |
* Callouts are allowed to block. |
* Raw notifier chains: There are no restrictions on callbacks, |
* registration, or unregistration. All locking and protection |
* must be provided by the caller. |
* SRCU notifier chains: A variant of blocking notifier chains, with |
* the same restrictions. |
* |
* atomic_notifier_chain_register() may be called from an atomic context, |
* but blocking_notifier_chain_register() and srcu_notifier_chain_register() |
* must be called from a process context. Ditto for the corresponding |
* _unregister() routines. |
* |
* atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(), |
* and srcu_notifier_chain_unregister() _must not_ be called from within |
* the call chain. |
* |
* SRCU notifier chains are an alternative form of blocking notifier chains. |
* They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for |
* protection of the chain links. This means there is _very_ low overhead |
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers. |
* As compensation, srcu_notifier_chain_unregister() is rather expensive. |
* SRCU notifier chains should be used when the chain will be called very |
* often but notifier_blocks will seldom be removed. Also, SRCU notifier |
* chains are slightly more difficult to use because they require special |
* runtime initialization. |
*/ |
struct notifier_block; |
typedef int (*notifier_fn_t)(struct notifier_block *nb, |
unsigned long action, void *data); |
struct notifier_block { |
notifier_fn_t notifier_call; |
struct notifier_block __rcu *next; |
int priority; |
}; |
/* Console keyboard events. |
* Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and |
* KBD_KEYSYM. */ |
#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */ |
#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */ |
#define KBD_UNICODE 0x0003 /* Keyboard unicode */ |
#define KBD_KEYSYM 0x0004 /* Keyboard keysym */ |
#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ |
#endif /* _LINUX_NOTIFIER_H */ |
/drivers/include/linux/pci.h |
---|
990,23 → 990,6 |
return pdev->is_managed; |
} |
static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq) |
{ |
pdev->irq = irq; |
pdev->irq_managed = 1; |
} |
static inline void pci_reset_managed_irq(struct pci_dev *pdev) |
{ |
pdev->irq = 0; |
pdev->irq_managed = 0; |
} |
static inline bool pci_has_managed_irq(struct pci_dev *pdev) |
{ |
return pdev->irq_managed && pdev->irq > 0; |
} |
void pci_disable_device(struct pci_dev *dev); |
extern unsigned int pcibios_max_latency; |
1267,8 → 1250,6 |
u16 entry; /* driver uses to specify entry, OS writes */ |
}; |
void pci_msi_setup_pci_dev(struct pci_dev *dev); |
#ifdef CONFIG_PCI_MSI |
int pci_msi_vec_count(struct pci_dev *dev); |
void pci_msi_shutdown(struct pci_dev *dev); |
1956,6 → 1937,16 |
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } |
#endif /* CONFIG_OF */ |
#ifdef CONFIG_ACPI |
struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus); |
void |
pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)); |
#else |
static inline struct irq_domain * |
pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } |
#endif |
#ifdef CONFIG_EEH |
static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) |
{ |
2003,4 → 1994,6 |
const struct pci_device_id* |
find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist); |
struct pci_dev * _pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); |
#endif /* LINUX_PCI_H */ |
/drivers/include/linux/pm.h |
---|
573,6 → 573,7 |
struct wakeup_source *wakeup; |
bool wakeup_path:1; |
bool syscore:1; |
bool no_pm_callbacks:1; /* Owned by the PM core */ |
#else |
unsigned int should_wakeup:1; |
#endif |
/drivers/include/linux/pm_runtime.h |
---|
10,6 → 10,7 |
#define _LINUX_PM_RUNTIME_H |
#include <linux/device.h> |
#include <linux/notifier.h> |
#include <linux/pm.h> |
#include <linux/jiffies.h> |
38,6 → 39,7 |
extern int __pm_runtime_idle(struct device *dev, int rpmflags); |
extern int __pm_runtime_suspend(struct device *dev, int rpmflags); |
extern int __pm_runtime_resume(struct device *dev, int rpmflags); |
extern int pm_runtime_get_if_in_use(struct device *dev); |
extern int pm_schedule_suspend(struct device *dev, unsigned int delay); |
extern int __pm_runtime_set_status(struct device *dev, unsigned int status); |
extern int pm_runtime_barrier(struct device *dev); |
142,6 → 144,10 |
{ |
return -ENOSYS; |
} |
static inline int pm_runtime_get_if_in_use(struct device *dev) |
{ |
return -EINVAL; |
} |
static inline int __pm_runtime_set_status(struct device *dev, |
unsigned int status) { return 0; } |
static inline int pm_runtime_barrier(struct device *dev) { return 0; } |
/drivers/include/linux/poison.h |
---|
27,11 → 27,15 |
* Magic number "tsta" to indicate a static timer initializer |
* for the object debugging code. |
*/ |
#define TIMER_ENTRY_STATIC ((void *) 0x74737461) |
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) |
/********** mm/debug-pagealloc.c **********/ |
#define PAGE_POISON 0xaa |
/********** mm/page_alloc.c ************/ |
#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA) |
/********** mm/slab.c **********/ |
/* |
* Magic nums for obj red zoning. |
/drivers/include/linux/printk.h |
---|
68,14 → 68,16 |
/* |
* Dummy printk for disabled debugging statements to use whilst maintaining |
* gcc's format and side-effect checking. |
* gcc's format checking. |
*/ |
static inline __printf(1, 2) |
int no_printk(const char *fmt, ...) |
{ |
return 0; |
} |
#define no_printk(fmt, ...) \ |
do { \ |
if (0) \ |
printk(fmt, ##__VA_ARGS__); \ |
} while (0) |
__printf(1, 2) int dbgprintf(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
/drivers/include/linux/property.h |
---|
144,15 → 144,19 |
/** |
* struct property_entry - "Built-in" device property representation. |
* @name: Name of the property. |
* @type: Type of the property. |
* @nval: Number of items of type @type making up the value. |
* @value: Value of the property (an array of @nval items of type @type). |
* @length: Length of data making up the value. |
* @is_array: True when the property is an array. |
* @is_string: True when property is a string. |
* @pointer: Pointer to the property (an array of items of the given type). |
* @value: Value of the property (when it is a single item of the given type). |
*/ |
struct property_entry { |
const char *name; |
enum dev_prop_type type; |
size_t nval; |
size_t length; |
bool is_array; |
bool is_string; |
union { |
union { |
void *raw_data; |
u8 *u8_data; |
u16 *u16_data; |
159,9 → 163,81 |
u32 *u32_data; |
u64 *u64_data; |
const char **str; |
} pointer; |
union { |
unsigned long long raw_data; |
u8 u8_data; |
u16 u16_data; |
u32 u32_data; |
u64 u64_data; |
const char *str; |
} value; |
}; |
}; |
/* |
* Note: the below four initializers for the anonymous union are carefully |
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions |
* and structs. |
*/ |
#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ |
{ \ |
.name = _name_, \ |
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \ |
.is_array = true, \ |
.is_string = false, \ |
{ .pointer = { _type_##_data = _val_ } }, \ |
} |
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, _val_) |
#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, _val_) |
#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, _val_) |
#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) |
#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ |
{ \ |
.name = _name_, \ |
.length = ARRAY_SIZE(_val_) * sizeof(const char *), \ |
.is_array = true, \ |
.is_string = true, \ |
{ .pointer = { .str = _val_ } }, \ |
} |
#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ |
{ \ |
.name = _name_, \ |
.length = sizeof(_type_), \ |
.is_string = false, \ |
{ .value = { ._type_##_data = _val_ } }, \ |
} |
#define PROPERTY_ENTRY_U8(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER(_name_, u8, _val_) |
#define PROPERTY_ENTRY_U16(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER(_name_, u16, _val_) |
#define PROPERTY_ENTRY_U32(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER(_name_, u32, _val_) |
#define PROPERTY_ENTRY_U64(_name_, _val_) \ |
PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) |
#define PROPERTY_ENTRY_STRING(_name_, _val_) \ |
{ \ |
.name = _name_, \ |
.length = sizeof(_val_), \ |
.is_string = true, \ |
{ .value = { .str = _val_ } }, \ |
} |
#define PROPERTY_ENTRY_BOOL(_name_) \ |
{ \ |
.name = _name_, \ |
} |
/** |
* struct property_set - Collection of "built-in" device properties. |
* @fwnode: Handle to be pointed to by the fwnode field of struct device. |
172,7 → 248,8 |
struct property_entry *properties; |
}; |
void device_add_property_set(struct device *dev, struct property_set *pset); |
int device_add_property_set(struct device *dev, const struct property_set *pset); |
void device_remove_property_set(struct device *dev); |
bool device_dma_supported(struct device *dev); |
/drivers/include/linux/pwm.h |
---|
179,6 → 179,8 |
void pwm_put(struct pwm_device *pwm); |
struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id); |
struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np, |
const char *con_id); |
void devm_pwm_put(struct device *dev, struct pwm_device *pwm); |
bool pwm_can_sleep(struct pwm_device *pwm); |
192,11 → 194,36 |
{ |
return NULL; |
} |
static inline int pwmchip_add(struct pwm_chip *chip) |
{ |
return -EINVAL; |
} |
static inline int pwmchip_add_inversed(struct pwm_chip *chip) |
{ |
return -EINVAL; |
} |
static inline int pwmchip_remove(struct pwm_chip *chip) |
{ |
return -EINVAL; |
} |
static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, |
unsigned int index, |
const char *label) |
{ |
return ERR_PTR(-ENODEV); |
} |
static inline struct pwm_device *pwm_get(struct device *dev, |
const char *consumer) |
{ |
return ERR_PTR(-ENODEV); |
} |
static inline void pwm_put(struct pwm_device *pwm) |
{ |
} |
206,6 → 233,8 |
{ |
return ERR_PTR(-ENODEV); |
} |
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm) |
{ |
} |
/drivers/include/linux/rbtree.h |
---|
50,7 → 50,7 |
#define RB_ROOT (struct rb_root) { NULL, } |
#define rb_entry(ptr, type, member) container_of(ptr, type, member) |
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) |
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
#define RB_EMPTY_NODE(node) \ |
/drivers/include/linux/rculist.h |
---|
179,33 → 179,32 |
} |
/** |
* list_splice_init_rcu - splice an RCU-protected list into an existing list. |
* __list_splice_init_rcu - join an RCU-protected list into an existing list. |
* @list: the RCU-protected list to splice |
* @head: the place in the list to splice the first list into |
* @prev: points to the last element of the existing list |
* @next: points to the first element of the existing list |
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... |
* |
* @head can be RCU-read traversed concurrently with this function. |
* The list pointed to by @prev and @next can be RCU-read traversed |
* concurrently with this function. |
* |
* Note that this function blocks. |
* |
* Important note: the caller must take whatever action is necessary to |
* prevent any other updates to @head. In principle, it is possible |
* to modify the list as soon as sync() begins execution. |
* If this sort of thing becomes necessary, an alternative version |
* based on call_rcu() could be created. But only if -really- |
* needed -- there is no shortage of RCU API members. |
* Important note: the caller must take whatever action is necessary to prevent |
* any other updates to the existing list. In principle, it is possible to |
* modify the list as soon as sync() begins execution. If this sort of thing |
* becomes necessary, an alternative version based on call_rcu() could be |
* created. But only if -really- needed -- there is no shortage of RCU API |
* members. |
*/ |
static inline void list_splice_init_rcu(struct list_head *list, |
struct list_head *head, |
static inline void __list_splice_init_rcu(struct list_head *list, |
struct list_head *prev, |
struct list_head *next, |
void (*sync)(void)) |
{ |
struct list_head *first = list->next; |
struct list_head *last = list->prev; |
struct list_head *at = head->next; |
if (list_empty(list)) |
return; |
/* |
* "first" and "last" tracking list, so initialize it. RCU readers |
* have access to this list, so we must use INIT_LIST_HEAD_RCU() |
231,13 → 230,43 |
* this function. |
*/ |
last->next = at; |
rcu_assign_pointer(list_next_rcu(head), first); |
first->prev = head; |
at->prev = last; |
last->next = next; |
rcu_assign_pointer(list_next_rcu(prev), first); |
first->prev = prev; |
next->prev = last; |
} |
/** |
* list_splice_init_rcu - splice an RCU-protected list into an existing list, |
* designed for stacks. |
* @list: the RCU-protected list to splice |
* @head: the place in the existing list to splice the first list into |
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... |
*/ |
static inline void list_splice_init_rcu(struct list_head *list, |
struct list_head *head, |
void (*sync)(void)) |
{ |
if (!list_empty(list)) |
__list_splice_init_rcu(list, head, head->next, sync); |
} |
/** |
* list_splice_tail_init_rcu - splice an RCU-protected list into an existing |
* list, designed for queues. |
* @list: the RCU-protected list to splice |
* @head: the place in the existing list to splice the first list into |
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... |
*/ |
static inline void list_splice_tail_init_rcu(struct list_head *list, |
struct list_head *head, |
void (*sync)(void)) |
{ |
if (!list_empty(list)) |
__list_splice_init_rcu(list, head->prev, head, sync); |
} |
/** |
* list_entry_rcu - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
305,6 → 334,42 |
pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
/** |
* list_entry_lockless - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_head within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu(), but requires some implicit RCU |
* read-side guarding. One example is running within a special |
* exception-time environment where preemption is disabled and where |
* lockdep cannot be invoked (in which case updaters must use RCU-sched, |
* as in synchronize_sched(), call_rcu_sched(), and friends). Another |
* example is when items are added to the list, but never deleted. |
*/ |
#define list_entry_lockless(ptr, type, member) \ |
container_of((typeof(ptr))lockless_dereference(ptr), type, member) |
/** |
* list_for_each_entry_lockless - iterate over rcu list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* This primitive may safely run concurrently with the _rcu list-mutation |
* primitives such as list_add_rcu(), but requires some implicit RCU |
* read-side guarding. One example is running within a special |
* exception-time environment where preemption is disabled and where |
* lockdep cannot be invoked (in which case updaters must use RCU-sched, |
* as in synchronize_sched(), call_rcu_sched(), and friends). Another |
* example is when items are added to the list, but never deleted. |
*/ |
#define list_for_each_entry_lockless(pos, head, member) \ |
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry_lockless(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue_rcu - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
/drivers/include/linux/rcupdate.h |
---|
48,10 → 48,17 |
#include <asm/barrier.h> |
#ifndef CONFIG_TINY_RCU |
extern int rcu_expedited; /* for sysctl */ |
extern int rcu_normal; /* also for sysctl */ |
#endif /* #ifndef CONFIG_TINY_RCU */ |
#ifdef CONFIG_TINY_RCU |
/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ |
static inline bool rcu_gp_is_normal(void) /* Internal RCU use. */ |
{ |
return true; |
} |
static inline bool rcu_gp_is_expedited(void) /* Internal RCU use. */ |
{ |
return false; |
65,6 → 72,7 |
{ |
} |
#else /* #ifdef CONFIG_TINY_RCU */ |
bool rcu_gp_is_normal(void); /* Internal RCU use. */ |
bool rcu_gp_is_expedited(void); /* Internal RCU use. */ |
void rcu_expedite_gp(void); |
void rcu_unexpedite_gp(void); |
283,7 → 291,6 |
/* Internal to kernel */ |
void rcu_init(void); |
void rcu_end_inkernel_boot(void); |
void rcu_sched_qs(void); |
void rcu_bh_qs(void); |
void rcu_check_callbacks(int user); |
291,6 → 298,12 |
int rcu_cpu_notify(struct notifier_block *self, |
unsigned long action, void *hcpu); |
#ifndef CONFIG_TINY_RCU |
void rcu_end_inkernel_boot(void); |
#else /* #ifndef CONFIG_TINY_RCU */ |
static inline void rcu_end_inkernel_boot(void) { } |
#endif /* #ifndef CONFIG_TINY_RCU */ |
#ifdef CONFIG_RCU_STALL_COMMON |
void rcu_sysrq_start(void); |
void rcu_sysrq_end(void); |
341,9 → 354,9 |
*/ |
#define RCU_NONIDLE(a) \ |
do { \ |
rcu_irq_enter(); \ |
rcu_irq_enter_irqson(); \ |
do { a; } while (0); \ |
rcu_irq_exit(); \ |
rcu_irq_exit_irqson(); \ |
} while (0) |
/* |
703,7 → 716,7 |
* The tracing infrastructure traces RCU (we want that), but unfortunately |
* some of the RCU checks causes tracing to lock up the system. |
* |
* The tracing version of rcu_dereference_raw() must not call |
* The no-tracing version of rcu_dereference_raw() must not call |
* rcu_read_lock_held(). |
*/ |
#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu) |
754,6 → 767,28 |
#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
/** |
* rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism |
* @p: The pointer to hand off |
* |
* This is simply an identity function, but it documents where a pointer |
* is handed off from RCU to some other synchronization mechanism, for |
* example, reference counting or locking. In C11, it would map to |
* kill_dependency(). It could be used as follows: |
* |
* rcu_read_lock(); |
* p = rcu_dereference(gp); |
* long_lived = is_long_lived(p); |
* if (long_lived) { |
* if (!atomic_inc_not_zero(p->refcnt)) |
* long_lived = false; |
* else |
* p = rcu_pointer_handoff(p); |
* } |
* rcu_read_unlock(); |
*/ |
#define rcu_pointer_handoff(p) (p) |
/** |
* rcu_read_lock() - mark the beginning of an RCU read-side critical section |
* |
* When synchronize_rcu() is invoked on one CPU while other CPUs |
985,7 → 1020,7 |
#define RCU_INIT_POINTER(p, v) \ |
do { \ |
rcu_dereference_sparse(p, __rcu); \ |
p = RCU_INITIALIZER(v); \ |
WRITE_ONCE(p, RCU_INITIALIZER(v)); \ |
} while (0) |
/** |
/drivers/include/linux/rcutiny.h |
---|
175,6 → 175,14 |
{ |
} |
static inline void rcu_irq_exit_irqson(void) |
{ |
} |
static inline void rcu_irq_enter_irqson(void) |
{ |
} |
static inline void rcu_irq_exit(void) |
{ |
} |
/drivers/include/linux/seq_file.h |
---|
5,6 → 5,10 |
#include <linux/string.h> |
#include <linux/bug.h> |
#include <linux/mutex.h> |
struct file; |
struct path; |
struct inode; |
struct dentry; |
struct seq_file { |
char *buf; |
/drivers/include/linux/seqlock.h |
---|
234,7 → 234,53 |
s->sequence++; |
} |
/* |
/** |
* raw_write_seqcount_barrier - do a seq write barrier |
* @s: pointer to seqcount_t |
* |
* This can be used to provide an ordering guarantee instead of the |
* usual consistency guarantee. It is one wmb cheaper, because we can |
* collapse the two back-to-back wmb()s. |
* |
* seqcount_t seq; |
* bool X = true, Y = false; |
* |
* void read(void) |
* { |
* bool x, y; |
* |
* do { |
* int s = read_seqcount_begin(&seq); |
* |
* x = X; y = Y; |
* |
* } while (read_seqcount_retry(&seq, s)); |
* |
* BUG_ON(!x && !y); |
* } |
* |
* void write(void) |
* { |
* Y = true; |
* |
* raw_write_seqcount_barrier(seq); |
* |
* X = false; |
* } |
*/ |
static inline void raw_write_seqcount_barrier(seqcount_t *s) |
{ |
s->sequence++; |
smp_wmb(); |
s->sequence++; |
} |
static inline int raw_read_seqcount_latch(seqcount_t *s) |
{ |
return lockless_dereference(s->sequence); |
} |
/** |
* raw_write_seqcount_latch - redirect readers to even/odd copy |
* @s: pointer to seqcount_t |
* |
/drivers/include/linux/slab.h |
---|
86,6 → 86,11 |
#else |
# define SLAB_FAILSLAB 0x00000000UL |
#endif |
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ |
#else |
# define SLAB_ACCOUNT 0x00000000UL |
#endif |
/* The following flags affect the page allocator grouping pages by mobility */ |
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
113,14 → 118,14 |
int kmem_cache_shrink(struct kmem_cache *); |
void kmem_cache_free(struct kmem_cache *, void *); |
static inline void *krealloc(void *p, size_t new_size, gfp_t flags) |
static inline void *krealloc(const void *p, size_t new_size, gfp_t flags) |
{ |
return __builtin_realloc(p, new_size); |
return __builtin_realloc((void*)p, new_size); |
} |
static inline void kfree(void *p) |
static inline void kfree(const void *p) |
{ |
__builtin_free(p); |
__builtin_free((void*)p); |
} |
static __always_inline void *kmalloc(size_t size, gfp_t flags) |
{ |
/drivers/include/linux/string.h |
---|
10,6 → 10,7 |
extern char *strndup_user(const char __user *, long); |
extern void *memdup_user(const void __user *, size_t); |
extern void *memdup_user_nul(const void __user *, size_t); |
/* |
* Include machine specific inline routines |
127,11 → 128,7 |
extern void argv_free(char **argv); |
extern bool sysfs_streq(const char *s1, const char *s2); |
extern int kstrtobool(const char *s, bool *res); |
static inline int strtobool(const char *s, bool *res) |
{ |
return kstrtobool(s, res); |
} |
extern int strtobool(const char *s, bool *res); |
#ifdef CONFIG_BINARY_PRINTF |
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); |
/drivers/include/linux/sysfs.h |
---|
31,6 → 31,15 |
struct lock_class_key skey; |
#endif |
}; |
struct attribute_group { |
const char *name; |
umode_t (*is_visible)(struct kobject *, |
struct attribute *, int); |
umode_t (*is_bin_visible)(struct kobject *, |
struct bin_attribute *, int); |
struct attribute **attrs; |
struct bin_attribute **bin_attrs; |
}; |
#ifdef CONFIG_SYSFS |
int __must_check sysfs_create_dir_ns(struct kobject *kobj, const void *ns); |
216,7 → 225,34 |
{ |
} |
static inline int sysfs_create_group(struct kobject *kobj, |
const struct attribute_group *grp) |
{ |
return 0; |
} |
static inline int sysfs_create_groups(struct kobject *kobj, |
const struct attribute_group **groups) |
{ |
return 0; |
} |
static inline int sysfs_update_group(struct kobject *kobj, |
const struct attribute_group *grp) |
{ |
return 0; |
} |
static inline void sysfs_remove_group(struct kobject *kobj, |
const struct attribute_group *grp) |
{ |
} |
static inline void sysfs_remove_groups(struct kobject *kobj, |
const struct attribute_group **groups) |
{ |
} |
static inline int sysfs_add_file_to_group(struct kobject *kobj, |
const struct attribute *attr, const char *group) |
{ |
228,6 → 264,17 |
{ |
} |
static inline int sysfs_merge_group(struct kobject *kobj, |
const struct attribute_group *grp) |
{ |
return 0; |
} |
static inline void sysfs_unmerge_group(struct kobject *kobj, |
const struct attribute_group *grp) |
{ |
} |
static inline int sysfs_add_link_to_group(struct kobject *kobj, |
const char *group_name, struct kobject *target, |
const char *link_name) |
/drivers/include/linux/sysrq.h |
---|
1,2 → 1,14 |
/* -*- linux-c -*- |
* |
* $Id: sysrq.h,v 1.3 1997/07/17 11:54:33 mj Exp $ |
* |
* Linux Magic System Request Key Hacks |
* |
* (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
* |
* (c) 2000 Crutcher Dunnavant <crutcher+kernel@datastacks.com> |
* overhauled to use key registration |
* based upon discusions in irc://irc.openprojects.net/#kernelnewbies |
*/ |
// stub |
/drivers/include/linux/timer.h |
---|
2,7 → 2,12 |
#define _LINUX_TIMER_H |
#include <linux/list.h> |
#include <linux/ktime.h> |
#include <linux/stddef.h> |
#include <linux/stringify.h> |
struct tvec_base; |
unsigned long __round_jiffies(unsigned long j, int cpu); |
unsigned long __round_jiffies_relative(unsigned long j, int cpu); |
unsigned long round_jiffies(unsigned long j); |
/drivers/include/linux/vmalloc.h |
---|
13,7 → 13,6 |
#define VM_ALLOC 0x00000002 /* vmalloc() */ |
#define VM_MAP 0x00000004 /* vmap()ed pages */ |
#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ |
#define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ |
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */ |
#define VM_NO_GUARD 0x00000040 /* don't add guard page */ |
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */ |