Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 7142 → Rev 7143

/drivers/include/linux/atomic.h
34,7 → 34,12
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
* Besides, if an arch has a special barrier for acquire/release, it could
* implement its own __atomic_op_* and use the same framework for building
* variants
*/
#ifndef __atomic_op_acquire
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
41,13 → 46,17
smp_mb__after_atomic(); \
__ret; \
})
#endif
 
#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
smp_mb__before_atomic(); \
op##_relaxed(args); \
})
#endif
 
#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
56,6 → 65,7
smp_mb__after_atomic(); \
__ret; \
})
#endif
 
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
548,6 → 558,27
}
#endif
 
/**
* atomic_fetch_or - perform *p |= mask and return old value of *p
* @p: pointer to atomic_t
* @mask: mask to OR on the atomic_t
*/
#ifndef atomic_fetch_or
static inline int atomic_fetch_or(atomic_t *p, int mask)
{
int old, val = atomic_read(p);
 
for (;;) {
old = atomic_cmpxchg(p, val, val | mask);
if (old == val)
break;
val = old;
}
 
return old;
}
#endif
 
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
/drivers/include/linux/bitmap.h
59,6 → 59,8
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
* bitmap_release_region(bitmap, pos, order) Free specified bit region
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
* bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
*/
 
/*
163,6 → 165,14
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
unsigned int nbits,
const u32 *buf,
unsigned int nwords);
extern unsigned int bitmap_to_u32array(u32 *buf,
unsigned int nwords,
const unsigned long *bitmap,
unsigned int nbits);
#ifdef __BIG_ENDIAN
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
#else
/drivers/include/linux/bug.h
20,6 → 20,7
#define BUILD_BUG_ON_MSG(cond, msg) (0)
#define BUILD_BUG_ON(condition) (0)
#define BUILD_BUG() (0)
#define MAYBE_BUILD_BUG_ON(cond) (0)
#else /* __CHECKER__ */
 
/* Force a compilation error if a constant expression is not a power of 2 */
83,6 → 84,14
*/
#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
 
#define MAYBE_BUILD_BUG_ON(cond) \
do { \
if (__builtin_constant_p((cond))) \
BUILD_BUG_ON(cond); \
else \
BUG_ON(cond); \
} while (0)
 
#endif /* __CHECKER__ */
 
#ifdef CONFIG_GENERIC_BUG
/drivers/include/linux/cache.h
12,10 → 12,24
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
 
/*
* __read_mostly is used to keep rarely changing variables out of frequently
* updated cachelines. If an architecture doesn't support it, ignore the
* hint.
*/
#ifndef __read_mostly
#define __read_mostly
#endif
 
/*
* __ro_after_init is used to mark things that are read-only after init (i.e.
* after mark_rodata_ro() has been called). These are effectively read-only,
* but may get written to during init, so can't live in .rodata (via "const").
*/
#ifndef __ro_after_init
#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
#endif
 
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
/drivers/include/linux/clocksource.h
118,6 → 118,23
/* simplify initialization of mask field */
#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
 
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
/* freq = cyc/from
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = from/freq * 2^shift
* mult = from * 2^shift / freq
* mult = (from<<shift) / freq
*/
u64 tmp = ((u64)from) << shift_constant;
 
tmp += freq/2; /* round for do_div */
do_div(tmp, freq);
 
return (u32)tmp;
}
 
/**
* clocksource_khz2mult - calculates mult from khz and shift
* @khz: Clocksource frequency in KHz
128,19 → 145,7
*/
static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
{
/* khz = cyc/(Million ns)
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Million/khz * 2^shift
* mult = 1000000 * 2^shift / khz
* mult = (1000000<<shift) / khz
*/
u64 tmp = ((u64)1000000) << shift_constant;
 
tmp += khz/2; /* round for do_div */
do_div(tmp, khz);
 
return (u32)tmp;
return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC);
}
 
/**
154,19 → 159,7
*/
static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
{
/* hz = cyc/(Billion ns)
* mult/2^shift = ns/cyc
* mult = ns/cyc * 2^shift
* mult = 1Billion/hz * 2^shift
* mult = 1000000000 * 2^shift / hz
* mult = (1000000000<<shift) / hz
*/
u64 tmp = ((u64)1000000000) << shift_constant;
 
tmp += hz/2; /* round for do_div */
do_div(tmp, hz);
 
return (u32)tmp;
return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC);
}
 
/**
/drivers/include/linux/compiler-gcc.h
246,7 → 246,7
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
/drivers/include/linux/compiler.h
20,12 → 20,14
# define __pmem __attribute__((noderef, address_space(5)))
#ifdef CONFIG_SPARSE_RCU_POINTER
# define __rcu __attribute__((noderef, address_space(4)))
#else
#else /* CONFIG_SPARSE_RCU_POINTER */
# define __rcu
#endif
#endif /* CONFIG_SPARSE_RCU_POINTER */
# define __private __attribute__((noderef))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
#else /* __CHECKER__ */
# define __user
# define __kernel
# define __safe
44,7 → 46,9
# define __percpu
# define __rcu
# define __pmem
#endif
# define __private
# define ACCESS_PRIVATE(p, member) ((p)->member)
#endif /* __CHECKER__ */
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
263,8 → 267,9
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
* READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
* least two memcpy()s: one for the __builtin_memcpy() and then one for
* the macro doing the copy of variable - '__u' allocated on the stack.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
/drivers/include/linux/cpumask.h
607,8 → 607,6
 
/**
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
*
* This will eventually be a runtime variable, depending on nr_cpu_ids.
*/
static inline size_t cpumask_size(void)
{
/drivers/include/linux/device.h
122,6 → 122,9
dev->driver_data = data;
}
 
static inline __printf(2, 3)
void dev_notice(const struct device *dev, const char *fmt, ...)
{}
 
 
#endif /* _DEVICE_H_ */
/drivers/include/linux/dma-attrs.h
18,6 → 18,7
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_ALLOC_SINGLE_PAGES,
DMA_ATTR_MAX,
};
 
/drivers/include/linux/dma-buf.h
54,7 → 54,7
* @release: release this buffer; to be called after the last dma_buf_put.
* @begin_cpu_access: [optional] called before cpu access to invalidate cpu
* caches and allocate backing storage (if not yet done)
* respectively pin the objet into memory.
* respectively pin the object into memory.
* @end_cpu_access: [optional] called after cpu access to flush caches.
* @kmap_atomic: maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
93,10 → 93,8
/* after final dma_buf_put() */
void (*release)(struct dma_buf *);
 
int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
enum dma_data_direction);
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
void *(*kmap_atomic)(struct dma_buf *, unsigned long);
void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*kmap)(struct dma_buf *, unsigned long);
192,10 → 190,6
* kernel side. For example, an exporter that needs to keep a dmabuf ptr
* so that subsequent exports don't create a new dmabuf.
*/
static inline void get_dma_buf(struct dma_buf *dmabuf)
{
get_file(dmabuf->file);
}
 
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
212,9 → 206,9
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
/drivers/include/linux/fb.h
296,9 → 296,6
/* Draws cursor */
int (*fb_cursor) (struct fb_info *info, struct fb_cursor *cursor);
 
/* Rotates the display */
void (*fb_rotate)(struct fb_info *info, int angle);
 
/* wait for blit idle, optional */
int (*fb_sync)(struct fb_info *info);
 
/drivers/include/linux/fence.h
79,6 → 79,8
unsigned long flags;
ktime_t timestamp;
int status;
struct list_head child_list;
struct list_head active_list;
};
 
enum fence_flag_bits {
292,7 → 294,7
if (WARN_ON(f1->context != f2->context))
return false;
 
return f1->seqno - f2->seqno < INT_MAX;
return (int)(f1->seqno - f2->seqno) > 0;
}
 
/**
/drivers/include/linux/file.h
12,6 → 12,11
struct file;
 
extern void fput(struct file *);
 
struct file_operations;
struct vfsmount;
struct dentry;
struct path;
struct fd {
struct file *file;
unsigned int flags;
/drivers/include/linux/firmware.h
13,6 → 13,10
struct firmware {
size_t size;
const u8 *data;
struct page **pages;
 
/* firmware loader private fields */
void *priv;
};
 
struct module;
/drivers/include/linux/gfp.h
8,6 → 8,11
 
struct vm_area_struct;
 
/*
* In case of changes, please don't forget to update
* include/trace/events/mmflags.h and tools/perf/builtin-kmem.c
*/
 
/* Plain integer GFP bitmasks. Do not use this directly. */
#define ___GFP_DMA 0x01u
#define ___GFP_HIGHMEM 0x02u
47,7 → 52,6
#define __GFP_DMA ((__force gfp_t)___GFP_DMA)
#define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
100,8 → 104,6
*
* __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
* This takes precedence over the __GFP_MEMALLOC flag if both are set.
*
* __GFP_NOACCOUNT ignores the accounting for kmemcg limit enforcement.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
254,8 → 256,9
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN) & \
~__GFP_KSWAPD_RECLAIM)
~__GFP_RECLAIM)
 
 
static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
{
return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
309,7 → 312,7
* 0xe => BAD (MOVABLE+DMA32+HIGHMEM)
* 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA)
*
* ZONES_SHIFT must be <= 2 on 32 bit platforms.
* GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms.
*/
 
#if 16 * ZONES_SHIFT > BITS_PER_LONG
/drivers/include/linux/intel-iommu.h
0,0 → 1,506
/*
* Copyright © 2006-2015, Intel Corporation.
*
* Authors: Ashok Raj <ashok.raj@intel.com>
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* David Woodhouse <David.Woodhouse@intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
 
#ifndef _INTEL_IOMMU_H_
#define _INTEL_IOMMU_H_
 
#include <linux/types.h>
//#include <linux/iova.h>
#include <linux/io.h>
#include <linux/idr.h>
#include <linux/dma_remapping.h>
//#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
//#include <asm/iommu.h>
 
/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
 
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
#define DMAR_GCMD_REG 0x18 /* Global command register */
#define DMAR_GSTS_REG 0x1c /* Global status register */
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
#define DMAR_CCMD_REG 0x28 /* Context command reg */
#define DMAR_FSTS_REG 0x34 /* Fault Status register */
#define DMAR_FECTL_REG 0x38 /* Fault control register */
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
#define DMAR_PRS_REG 0xdc /* Page request status register */
#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
 
#define OFFSET_STRIDE (9)
 
#ifdef CONFIG_64BIT
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
#else
static inline u64 dmar_readq(void __iomem *addr)
{
u32 lo, hi;
lo = readl(addr);
hi = readl(addr + 4);
return (((u64) hi) << 32) + lo;
}
 
static inline void dmar_writeq(void __iomem *addr, u64 val)
{
writel((u32)val, addr);
writel((u32)(val >> 32), addr + 4);
}
#endif
 
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
 
/*
* Decoding Capability Register
*/
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
 
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
* OFFSET_STRIDE) + 21)
 
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
 
#define cap_zlr(c) (((c) >> 22) & 1)
#define cap_isoch(c) (((c) >> 23) & 1)
#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
#define cap_sagaw(c) (((c) >> 8) & 0x1f)
#define cap_caching_mode(c) (((c) >> 7) & 1)
#define cap_phmr(c) (((c) >> 6) & 1)
#define cap_plmr(c) (((c) >> 5) & 1)
#define cap_rwbf(c) (((c) >> 4) & 1)
#define cap_afl(c) (((c) >> 3) & 1)
#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
/*
* Extended Capability Register
*/
 
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
#define ecap_nwfs(e) ((e >> 33) & 0x1)
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
#define ecap_ecs(e) ((e >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) ((e >> 6) & 0x1)
#define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1)
#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
 
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
#define DMA_TLB_IVT (((u64)1) << 63)
#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_TLB_MAX_SIZE (0x3f)
 
/* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_ID_TLB_ADDR(addr) (addr)
#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
 
/* PMEN_REG */
#define DMA_PMEN_EPM (((u32)1)<<31)
#define DMA_PMEN_PRS (((u32)1)<<0)
 
/* GCMD_REG */
#define DMA_GCMD_TE (((u32)1) << 31)
#define DMA_GCMD_SRTP (((u32)1) << 30)
#define DMA_GCMD_SFL (((u32)1) << 29)
#define DMA_GCMD_EAFL (((u32)1) << 28)
#define DMA_GCMD_WBF (((u32)1) << 27)
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_SIRTP (((u32)1) << 24)
#define DMA_GCMD_IRE (((u32) 1) << 25)
#define DMA_GCMD_CFI (((u32) 1) << 23)
 
/* GSTS_REG */
#define DMA_GSTS_TES (((u32)1) << 31)
#define DMA_GSTS_RTPS (((u32)1) << 30)
#define DMA_GSTS_FLS (((u32)1) << 29)
#define DMA_GSTS_AFLS (((u32)1) << 28)
#define DMA_GSTS_WBFS (((u32)1) << 27)
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_IRTPS (((u32)1) << 24)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
 
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
 
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
#define DMA_CCMD_MASK_NOBIT 0
#define DMA_CCMD_MASK_1BIT 1
#define DMA_CCMD_MASK_2BIT 2
#define DMA_CCMD_MASK_3BIT 3
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
 
/* FECTL_REG */
#define DMA_FECTL_IM (((u32)1) << 31)
 
/* FSTS_REG */
#define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
#define DMA_FSTS_ICE (1 << 5)
#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
 
/* FRCD_REG, 32 bits access */
#define DMA_FRCD_F (((u32)1) << 31)
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
 
/* PRS_REG */
#define DMA_PRS_PPR ((u32)1)
 
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
while (1) { \
sts = op(iommu->reg + offset); \
if (cond) \
break; \
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
panic("DMAR hardware is malfunctioning\n"); \
cpu_relax(); \
} \
} while (0)
 
#define QI_LENGTH 256 /* queue length */
 
enum {
QI_FREE,
QI_IN_USE,
QI_DONE,
QI_ABORT
};
 
#define QI_CC_TYPE 0x1
#define QI_IOTLB_TYPE 0x2
#define QI_DIOTLB_TYPE 0x3
#define QI_IEC_TYPE 0x4
#define QI_IWD_TYPE 0x5
#define QI_EIOTLB_TYPE 0x6
#define QI_PC_TYPE 0x7
#define QI_DEIOTLB_TYPE 0x8
#define QI_PGRP_RESP_TYPE 0x9
#define QI_PSTRM_RESP_TYPE 0xa
 
#define QI_IEC_SELECTIVE (((u64)1) << 4)
#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
 
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
 
#define QI_IOTLB_DID(did) (((u64)did) << 16)
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
#define QI_IOTLB_AM(am) (((u8)am))
 
#define QI_CC_FM(fm) (((u64)fm) << 48)
#define QI_CC_SID(sid) (((u64)sid) << 32)
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
 
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
 
#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
#define QI_PC_DID(did) (((u64)did) << 16)
#define QI_PC_GRAN(gran) (((u64)gran) << 4)
 
#define QI_PC_ALL_PASIDS (QI_PC_TYPE | QI_PC_GRAN(0))
#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
 
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
#define QI_EIOTLB_AM(am) (((u64)am))
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_EIOTLB_DID(did) (((u64)did) << 16)
#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
 
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
#define QI_DEV_EIOTLB_MAX_INVS 32
 
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
#define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32)
#define QI_PGRP_RESP_CODE(res) ((u64)(res))
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
#define QI_PGRP_DID(did) (((u64)(did)) << 16)
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
 
#define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK)
#define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4)
#define QI_PSTRM_RESP_CODE(res) ((u64)(res))
#define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55)
#define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32)
#define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24)
#define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4)
 
#define QI_RESP_SUCCESS 0x0
#define QI_RESP_INVALID 0x1
#define QI_RESP_FAILURE 0xf
 
#define QI_GRAN_ALL_ALL 0
#define QI_GRAN_NONG_ALL 1
#define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3
 
struct qi_desc {
u64 low, high;
};
 
struct q_inval {
raw_spinlock_t q_lock;
struct qi_desc *desc; /* invalidation queue */
int *desc_status; /* desc status */
int free_head; /* first free entry */
int free_tail; /* last free entry */
int free_cnt;
};
 
#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
 
#define INTR_REMAP_TABLE_ENTRIES 65536
 
struct irq_domain;
 
struct ir_table {
struct irte *base;
unsigned long *bitmap;
};
#endif
 
struct iommu_flush {
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
};
 
enum {
SR_DMAR_FECTL_REG,
SR_DMAR_FEDATA_REG,
SR_DMAR_FEADDR_REG,
SR_DMAR_FEUADDR_REG,
MAX_SR_DMAR_REGS
};
 
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
 
struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;
 
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
unsigned int irq, pr_irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */
 
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
 
struct iommu_flush flush;
#endif
#ifdef CONFIG_INTEL_IOMMU_SVM
/* These are large and need to be contiguous, so we allocate just
* one for now. We'll maybe want to rethink that if we truly give
* devices away to userspace processes (e.g. for DPDK) and don't
* want to trust that userspace will use *only* the PASID it was
* told to. But while it's all driver-arbitrated, we're fine. */
struct pasid_entry *pasid_table;
struct pasid_state_entry *pasid_state_table;
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
struct idr pasid_idr;
#endif
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
 
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
struct irq_domain *ir_domain;
struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
int node;
u32 flags; /* Software defined flags */
};
 
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(addr, size);
}
 
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
 
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void dmar_disable_qi(struct intel_iommu *iommu);
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
 
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
u64 addr, unsigned mask);
 
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
 
extern int dmar_ir_support(void);
 
#ifdef CONFIG_INTEL_IOMMU_SVM
extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
 
struct svm_dev_ops;
 
struct intel_svm_dev {
struct list_head list;
struct rcu_head rcu;
struct device *dev;
struct svm_dev_ops *ops;
int users;
u16 did;
u16 dev_iotlb:1;
u16 sid, qdep;
};
 
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
struct intel_iommu *iommu;
int flags;
int pasid;
struct list_head devs;
};
 
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#endif
 
extern const struct attribute_group *intel_iommu_groups[];
 
#endif
/drivers/include/linux/io-mapping.h
0,0 → 1,171
/*
* Copyright © 2008 Keith Packard <keithp@keithp.com>
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
*/
 
#ifndef _LINUX_IO_MAPPING_H
#define _LINUX_IO_MAPPING_H
 
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bug.h>
#include <linux/io.h>
#include <asm/page.h>
 
/*
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
* See Documentation/io-mapping.txt
*/
 
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
 
#include <asm/iomap.h>
 
struct io_mapping {
void *vaddr;
resource_size_t base;
unsigned long size;
};
 
/*
* For small address space machines, mapping large objects
* into the kernel virtual space isn't practical. Where
* available, use fixmap support to dynamically map pages
* of the object at run time.
*/
 
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
struct io_mapping *iomap;
 
iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
if (!iomap)
goto out_err;
 
iomap->vaddr = AllocKernelSpace(4096);
if (iomap->vaddr == NULL)
goto out_free;
 
iomap->base = base;
iomap->size = size;
 
return iomap;
 
out_free:
kfree(iomap);
out_err:
return NULL;
}
 
static inline void
io_mapping_free(struct io_mapping *mapping)
{
FreeKernelSpace(mapping->vaddr);
kfree(mapping);
}
 
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
addr_t phys_addr;
 
BUG_ON(offset >= mapping->size);
phys_addr = (mapping->base + offset) & PAGE_MASK;
 
MapPage(mapping->vaddr, phys_addr, PG_WRITEC|PG_SW);
return mapping->vaddr;
}
 
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
MapPage(vaddr, 0, 0);
}
 
static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
addr_t phys_addr;
 
BUG_ON(offset >= mapping->size);
phys_addr = (mapping->base + offset) & PAGE_MASK;
 
MapPage(mapping->vaddr, phys_addr, PG_WRITEC|PG_SW);
return mapping->vaddr;
}
 
static inline void
io_mapping_unmap(void __iomem *vaddr)
{
MapPage(vaddr, 0, 0);
}
 
#else
 
#include <linux/uaccess.h>
 
/* this struct isn't actually defined anywhere */
struct io_mapping;
 
/* Create the io_mapping object*/
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
return (struct io_mapping __force *) ioremap_wc(base, size);
}
 
static inline void
io_mapping_free(struct io_mapping *mapping)
{
iounmap((void __force __iomem *) mapping);
}
 
/* Atomic map/unmap */
static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
preempt_disable();
pagefault_disable();
return ((char __force __iomem *) mapping) + offset;
}
 
static inline void
io_mapping_unmap_atomic(void __iomem *vaddr)
{
pagefault_enable();
preempt_enable();
}
 
/* Non-atomic map/unmap */
static inline void __iomem *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
return ((char __force __iomem *) mapping) + offset;
}
 
static inline void
io_mapping_unmap(void __iomem *vaddr)
{
}
 
#endif /* HAVE_ATOMIC_IOMAP */
 
#endif /* _LINUX_IO_MAPPING_H */
/drivers/include/linux/ioport.h
20,6 → 20,7
resource_size_t end;
const char *name;
unsigned long flags;
unsigned long desc;
struct resource *parent, *sibling, *child;
};
 
49,12 → 50,19
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
 
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
 
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
 
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
 
/* I/O resource extended types */
#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
 
/* PnP IRQ specific bits (IORESOURCE_BITS) */
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
98,13 → 106,27
 
/* PCI ROM control bits (IORESOURCE_BITS) */
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
/*
* I/O Resource Descriptors
*
* Descriptors are used by walk_iomem_res_desc() and region_intersects()
* for searching a specific resource range in the iomem table. Assign
* a new descriptor when a resource range supports the search interfaces.
* Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
*/
enum {
IORES_DESC_NONE = 0,
IORES_DESC_CRASH_KERNEL = 1,
IORES_DESC_ACPI_TABLES = 2,
IORES_DESC_ACPI_NV_STORAGE = 3,
IORES_DESC_PERSISTENT_MEMORY = 4,
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
};
 
/* helpers to define resources */
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
113,6 → 135,7
.end = (_start) + (_size) - 1, \
.name = (_name), \
.flags = (_flags), \
.desc = IORES_DESC_NONE, \
}
 
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
149,6 → 172,7
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int remove_resource(struct resource *old);
extern void arch_remove_reservations(struct resource *avail);
extern int allocate_resource(struct resource *root, struct resource *new,
resource_size_t size, resource_size_t min,
170,6 → 194,10
{
return res->flags & IORESOURCE_TYPE_BITS;
}
static inline unsigned long resource_ext_type(const struct resource *res)
{
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
static inline bool resource_contains(struct resource *r1, struct resource *r2)
{
/drivers/include/linux/kernel.h
63,7 → 63,7
#define round_down(x, y) ((x) & ~__round_mask(x, y))
 
#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_UP_ULL(ll,d) \
({ unsigned long long _tmp = (ll)+(d)-1; do_div(_tmp, d); _tmp; })
 
788,64 → 788,6
})
 
 
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
 
__builtin_memcpy((void __force *)to, from, n);
return 0;
}
 
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
unsigned long ret;
 
switch (n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
default:
break;
}
}
__builtin_memcpy((void __force *)to, from, n);
}
 
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
 
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
return __copy_to_user(to, from, n);
}
 
#define CAP_SYS_ADMIN 21
 
static inline bool capable(int cap)
861,14 → 803,8
 
typedef u64 async_cookie_t;
 
//#define iowrite32(v, addr) writel((v), (addr))
 
#define __init
 
#define CONFIG_PAGE_OFFSET 0
 
typedef long long __kernel_long_t;
typedef unsigned long long __kernel_ulong_t;
#define __kernel_long_t __kernel_long_t
 
#endif
/drivers/include/linux/kernfs.h
0,0 → 1,140
/*
* kernfs.h - pseudo filesystem decoupled from vfs locking
*
* This file is released under the GPLv2.
*/
 
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
 
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
#include <linux/wait.h>
 
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
struct super_block;
struct file_system_type;
 
struct kernfs_open_node;
struct kernfs_iattrs;
 
enum kernfs_node_type {
KERNFS_DIR = 0x0001,
KERNFS_FILE = 0x0002,
KERNFS_LINK = 0x0004,
};
 
#define KERNFS_TYPE_MASK 0x000f
#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
 
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
KERNFS_NS = 0x0020,
KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080,
KERNFS_LOCKDEP = 0x0100,
KERNFS_SUICIDAL = 0x0400,
KERNFS_SUICIDED = 0x0800,
KERNFS_EMPTY_DIR = 0x1000,
};
 
/* @flags for kernfs_create_root() */
enum kernfs_root_flag {
/*
* kernfs_nodes are created in the deactivated state and invisible.
* They require explicit kernfs_activate() to become visible. This
* can be used to make related nodes become visible atomically
* after all nodes are created successfully.
*/
KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
 
/*
* For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
* succeeds regardless of the RW permissions. sysfs had an extra
* layer of enforcement where open(2) fails with -EACCES regardless
* of CAP_DAC_OVERRIDE if the permission doesn't have the
* respective read or write access at all (none of S_IRUGO or
* S_IWUGO) or the respective operation isn't implemented. The
* following flag enables that behavior.
*/
KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK = 0x0002,
};
 
/* type-specific structures for kernfs_node union members */
struct kernfs_elem_dir {
unsigned long subdirs;
/* children rbtree starts here and goes through kn->rb */
struct rb_root children;
 
/*
* The kernfs hierarchy this directory belongs to. This fits
* better directly in kernfs_node but is here to save space.
*/
struct kernfs_root *root;
};
 
struct kernfs_elem_symlink {
struct kernfs_node *target_kn;
};
 
struct kernfs_elem_attr {
const struct kernfs_ops *ops;
struct kernfs_open_node *open;
loff_t size;
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
 
/*
* kernfs_node - the building block of kernfs hierarchy. Each and every
* kernfs node is represented by single kernfs_node. Most fields are
* private to kernfs and shouldn't be accessed directly by kernfs users.
*
* As long as s_count reference is held, the kernfs_node itself is
* accessible. Dereferencing elem or any other outer entity requires
* active reference.
*/
struct kernfs_node {
atomic_t count;
atomic_t active;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
/*
* Use kernfs_get_parent() and kernfs_name/path() instead of
* accessing the following two fields directly. If the node is
* never moved to a different parent, it is safe to access the
* parent directly.
*/
struct kernfs_node *parent;
const char *name;
 
struct rb_node rb;
 
const void *ns; /* namespace tag */
unsigned int hash; /* ns + name hash */
union {
struct kernfs_elem_dir dir;
struct kernfs_elem_symlink symlink;
struct kernfs_elem_attr attr;
};
 
void *priv;
 
unsigned short flags;
umode_t mode;
unsigned int ino;
struct kernfs_iattrs *iattr;
};
 
 
#endif /* __LINUX_KERNFS_H */
/drivers/include/linux/lockdep.h
196,9 → 196,11
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
u8 irq_context;
u8 depth;
u16 base;
/* see BUILD_BUG_ON()s in lookup_chain_cache() */
unsigned int irq_context : 2,
depth : 6,
base : 24;
/* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
261,7 → 263,6
/*
* Initialization, self-test and debugging-output methods:
*/
extern void lockdep_init(void);
extern void lockdep_info(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
392,7 → 393,6
# define lockdep_set_current_reclaim_state(g) do { } while (0)
# define lockdep_clear_current_reclaim_state() do { } while (0)
# define lockdep_trace_alloc(g) do { } while (0)
# define lockdep_init() do { } while (0)
# define lockdep_info() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
/drivers/include/linux/mmdebug.h
9,8 → 9,7
struct mm_struct;
 
extern void dump_page(struct page *page, const char *reason);
extern void dump_page_badflags(struct page *page, const char *reason,
unsigned long badflags);
extern void __dump_page(struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
 
/drivers/include/linux/pci-dma-compat.h
0,0 → 1,131
/* include this file if the platform implements the dma_ DMA Mapping API
* and wants to provide the pci_ DMA Mapping API in terms of it */
 
#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
#define _ASM_GENERIC_PCI_DMA_COMPAT_H
 
#include <linux/dma-mapping.h>
 
/* This defines the direction arg to the DMA mapping routines. */
#define PCI_DMA_BIDIRECTIONAL 0
#define PCI_DMA_TODEVICE 1
#define PCI_DMA_FROMDEVICE 2
#define PCI_DMA_NONE 3
 
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
}
 
static inline void *
pci_zalloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle)
{
return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev,
size, dma_handle, GFP_ATOMIC);
}
 
static inline void
pci_free_consistent(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
}
 
static inline dma_addr_t
pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
}
 
static inline void
pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
size_t size, int direction)
{
dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
}
 
static inline dma_addr_t
pci_map_page(struct pci_dev *hwdev, struct page *page,
unsigned long offset, size_t size, int direction)
{
return (dma_addr_t)( (offset)+page_to_phys(page));
}
 
static inline void
pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
size_t size, int direction)
{
 
}
 
static inline int
pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
 
static inline void
pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
int nents, int direction)
{
dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
size_t size, int direction)
{
dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
 
static inline void
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
}
 
static inline int
pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
{
return dma_mapping_error(&pdev->dev, dma_addr);
}
 
#ifdef CONFIG_PCI
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
return 0;
}
 
#else
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{ return -EIO; }
static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
unsigned long mask)
{ return -EIO; }
#endif
 
#endif
/drivers/include/linux/pci.h
742,9 → 742,26
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
 
enum {
PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
};
 
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
 
extern unsigned int pci_flags;
 
static inline void pci_set_flags(int flags) { pci_flags = flags; }
static inline void pci_add_flags(int flags) { pci_flags |= flags; }
static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
static inline int pci_has_flag(int flag) { return pci_flags & flag; }
 
void pcie_bus_configure_settings(struct pci_bus *bus);
 
enum pcie_bus_config_types {
766,6 → 783,7
int no_pci_devices(void);
 
void pcibios_resource_survey_bus(struct pci_bus *bus);
void pcibios_bus_add_device(struct pci_dev *pdev);
void pcibios_add_bus(struct pci_bus *bus);
void pcibios_remove_bus(struct pci_bus *bus);
void pcibios_fixup_bus(struct pci_bus *);
1006,8 → 1024,6
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
1100,6 → 1116,7
/* Vital product data routines */
ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
int pci_set_vpd_size(struct pci_dev *dev, size_t len);
 
/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
1231,6 → 1248,7
 
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
 
/* kmem_cache style wrapper around pci_alloc_consistent() */
 
#include <linux/pci-dma.h>
1398,6 → 1416,11
 
#else /* CONFIG_PCI is not enabled */
 
static inline void pci_set_flags(int flags) { }
static inline void pci_add_flags(int flags) { }
static inline void pci_clear_flags(int flags) { }
static inline int pci_has_flag(int flag) { return 0; }
 
/*
* If the system does not have PCI, clearly these return errors. Define
* these as simple inline functions to avoid hair in drivers.
1437,16 → 1460,6
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size)
{ return -EIO; }
static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
unsigned long mask)
{ return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
1508,6 → 1521,10
 
#include <asm/pci.h>
 
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
 
/* these helpers provide future and backwards compatibility
* for accessing popular PCI BAR info */
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
1731,6 → 1748,8
 
int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
void pci_disable_sriov(struct pci_dev *dev);
int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset);
void pci_iov_remove_virtfn(struct pci_dev *dev, int id, int reset);
int pci_num_vf(struct pci_dev *dev);
int pci_vfs_assigned(struct pci_dev *dev);
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs);
1747,6 → 1766,12
}
static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
{ return -ENODEV; }
static inline int pci_iov_add_virtfn(struct pci_dev *dev, int id, int reset)
{
return -ENOSYS;
}
static inline void pci_iov_remove_virtfn(struct pci_dev *dev,
int id, int reset) { }
static inline void pci_disable_sriov(struct pci_dev *dev) { }
static inline int pci_num_vf(struct pci_dev *dev) { return 0; }
static inline int pci_vfs_assigned(struct pci_dev *dev)
1827,12 → 1852,13
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
 
/* Small Resource Data Type Tag Item Names */
#define PCI_VPD_STIN_END 0x78 /* End */
#define PCI_VPD_STIN_END 0x0f /* End */
 
#define PCI_VPD_SRDT_END PCI_VPD_STIN_END
#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
 
#define PCI_VPD_SRDT_TIN_MASK 0x78
#define PCI_VPD_SRDT_LEN_MASK 0x07
#define PCI_VPD_LRDT_TIN_MASK 0x7f
 
#define PCI_VPD_LRDT_TAG_SIZE 3
#define PCI_VPD_SRDT_TAG_SIZE 1
1856,6 → 1882,17
}
 
/**
* pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
* @lrdt: Pointer to the beginning of the Large Resource Data Type tag
*
* Returns the extracted Large Resource Data Type Tag item.
*/
static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
{
return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
}
 
/**
* pci_vpd_srdt_size - Extracts the Small Resource Data Type length
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
1867,6 → 1904,17
}
 
/**
* pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
*
* Returns the extracted Small Resource Data Type Tag Item.
*/
static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
{
return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
}
 
/**
* pci_vpd_info_field_size - Extracts the information field length
* @lrdt: Pointer to the beginning of an information field header
*
1983,6 → 2031,9
return bus->self && bus->self->ari_enabled;
}
 
/* provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
 
typedef struct
{
struct list_head link;
/drivers/include/linux/pci_ids.h
110,6 → 110,7
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
#define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
 
2506,6 → 2507,10
 
#define PCI_VENDOR_ID_AZWAVE 0x1a3b
 
#define PCI_VENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBVENDOR_ID_REDHAT_QUMRANET 0x1af4
#define PCI_SUBDEVICE_ID_QEMU 0x1100
 
#define PCI_VENDOR_ID_ASMEDIA 0x1b21
 
#define PCI_VENDOR_ID_CIRCUITCO 0x1cc8
/drivers/include/linux/poison.h
30,7 → 30,11
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
 
/********** mm/debug-pagealloc.c **********/
#ifdef CONFIG_PAGE_POISONING_ZERO
#define PAGE_POISON 0x00
#else
#define PAGE_POISON 0xaa
#endif
 
/********** mm/page_alloc.c ************/
 
/drivers/include/linux/printk.h
242,10 → 242,10
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
__dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
#define pr_debug_ratelimited(fmt, ...) \
/drivers/include/linux/pwm.h
6,6 → 6,7
//#include <linux/of.h>
 
struct device;
struct device_node;
struct pwm_device;
struct seq_file;
 
223,6 → 224,11
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *of_pwm_get(struct device_node *np,
const char *con_id)
{
return ERR_PTR(-ENODEV);
}
 
static inline void pwm_put(struct pwm_device *pwm)
{
234,6 → 240,12
return ERR_PTR(-ENODEV);
}
 
static inline struct pwm_device *devm_of_pwm_get(struct device *dev,
struct device_node *np,
const char *con_id)
{
return ERR_PTR(-ENODEV);
}
 
static inline void devm_pwm_put(struct device *dev, struct pwm_device *pwm)
{
/drivers/include/linux/rculist.h
319,6 → 319,27
})
 
/**
* list_next_or_null_rcu - get the first element from a list
* @head: the head for the list.
* @ptr: the list head to take the next element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the ptr is at the end of the list, NULL is returned.
*
* This primitive may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_next_or_null_rcu(head, ptr, type, member) \
({ \
struct list_head *__head = (head); \
struct list_head *__ptr = (ptr); \
struct list_head *__next = READ_ONCE(__ptr->next); \
likely(__next != __head) ? list_entry_rcu(__next, type, \
member) : NULL; \
})
 
/**
* list_for_each_entry_rcu - iterate over rcu list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
/drivers/include/linux/rcupdate.h
294,9 → 294,7
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int user);
struct notifier_block;
int rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu);
void rcu_report_dead(unsigned int cpu);
 
#ifndef CONFIG_TINY_RCU
void rcu_end_inkernel_boot(void);
322,8 → 320,6
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_NO_HZ_FULL */
 
#ifdef CONFIG_RCU_NOCB_CPU
/drivers/include/linux/slab.h
20,7 → 20,7
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
92,6 → 92,12
# define SLAB_ACCOUNT 0x00000000UL
#endif
 
#ifdef CONFIG_KASAN
#define SLAB_KASAN 0x08000000UL
#else
#define SLAB_KASAN 0x00000000UL
#endif
 
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/drivers/include/linux/spinlock.h
51,6 → 51,7
#include <linux/linkage.h>
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/thread_info.h>
#include <linux/kernel.h>
#include <linux/stringify.h>
#include <linux/bottom_half.h>
/drivers/include/linux/stat.h
0,0 → 1,37
#ifndef _LINUX_STAT_H
#define _LINUX_STAT_H
 
 
#include <asm/stat.h>
#include <uapi/linux/stat.h>
 
#define S_IRWXUGO (S_IRWXU|S_IRWXG|S_IRWXO)
#define S_IALLUGO (S_ISUID|S_ISGID|S_ISVTX|S_IRWXUGO)
#define S_IRUGO (S_IRUSR|S_IRGRP|S_IROTH)
#define S_IWUGO (S_IWUSR|S_IWGRP|S_IWOTH)
#define S_IXUGO (S_IXUSR|S_IXGRP|S_IXOTH)
 
#define UTIME_NOW ((1l << 30) - 1l)
#define UTIME_OMIT ((1l << 30) - 2l)
 
#include <linux/types.h>
#include <linux/time.h>
#include <linux/uidgid.h>
 
struct kstat {
u64 ino;
dev_t dev;
umode_t mode;
unsigned int nlink;
kuid_t uid;
kgid_t gid;
dev_t rdev;
loff_t size;
struct timespec atime;
struct timespec mtime;
struct timespec ctime;
unsigned long blksize;
unsigned long long blocks;
};
 
#endif
/drivers/include/linux/string.h
128,8 → 128,14
extern void argv_free(char **argv);
 
extern bool sysfs_streq(const char *s1, const char *s2);
extern int strtobool(const char *s, bool *res);
extern int kstrtobool(const char *s, bool *res);
static inline int strtobool(const char *s, bool *res)
{
return kstrtobool(s, res);
}
 
int match_string(const char * const *array, size_t n, const char *string);
 
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
/drivers/include/linux/sysfs.h
202,7 → 202,11
{
}
 
#define sysfs_create_link(kobj,target, name) (0)
static inline int sysfs_create_link(struct kobject *kobj,
struct kobject *target, const char *name)
{
return 0;
}
 
static inline int sysfs_create_link_nowarn(struct kobject *kobj,
struct kobject *target,
211,7 → 215,9
return 0;
}
 
#define sysfs_remove_link(kobj, name)
static inline void sysfs_remove_link(struct kobject *kobj, const char *name)
{
}
 
static inline int sysfs_rename_link_ns(struct kobject *k, struct kobject *t,
const char *old_name,
/drivers/include/linux/thread_info.h
0,0 → 1,17
/* thread_info.h: common low-level thread information accessors
*
* Copyright (C) 2002 David Howells (dhowells@redhat.com)
* - Incorporating suggestions made by Linus Torvalds
*/
 
#ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H
 
#include <linux/types.h>
#include <linux/bug.h>
 
struct timespec;
struct compat_timespec;
 
 
#endif /* _LINUX_THREAD_INFO_H */
/drivers/include/linux/uaccess.h
2,6 → 2,7
#define __LINUX_UACCESS_H__
 
#include <linux/sched.h>
#include <asm/uaccess.h>
/*
* These routines enable/disable the pagefault handler. If disabled, it will
* not take any locks and go straight to the fixup table.
16,5 → 17,20
static inline void pagefault_enable(void)
{
}
#ifndef ARCH_HAS_NOCACHE_UACCESS
 
static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user_inatomic(to, from, n);
}
 
static inline unsigned long __copy_from_user_nocache(void *to,
const void __user *from, unsigned long n)
{
return __copy_from_user(to, from, n);
}
 
#endif /* ARCH_HAS_NOCACHE_UACCESS */
 
#endif /* __LINUX_UACCESS_H__ */
/drivers/include/linux/uidgid.h
0,0 → 1,27
#ifndef _LINUX_UIDGID_H
#define _LINUX_UIDGID_H
 
/*
* A set of types for the internal kernel types representing uids and gids.
*
* The types defined in this header allow distinguishing which uids and gids in
* the kernel are values used by userspace and which uid and gid values are
* the internal kernel values. With the addition of user namespaces the values
* can be different. Using the type system makes it possible for the compiler
* to detect when we overlook these differences.
*
*/
#include <linux/types.h>
typedef struct {
uid_t val;
} kuid_t;
 
 
typedef struct {
gid_t val;
} kgid_t;
 
#define KUIDT_INIT(value) (kuid_t){ value }
#define KGIDT_INIT(value) (kgid_t){ value }
 
#endif /* _LINUX_UIDGID_H */
/drivers/include/linux/unaligned/access_ok.h
4,62 → 4,62
#include <linux/kernel.h>
#include <asm/byteorder.h>
 
static inline u16 get_unaligned_le16(const void *p)
static __always_inline u16 get_unaligned_le16(const void *p)
{
return le16_to_cpup((__le16 *)p);
}
 
static inline u32 get_unaligned_le32(const void *p)
static __always_inline u32 get_unaligned_le32(const void *p)
{
return le32_to_cpup((__le32 *)p);
}
 
static inline u64 get_unaligned_le64(const void *p)
static __always_inline u64 get_unaligned_le64(const void *p)
{
return le64_to_cpup((__le64 *)p);
}
 
static inline u16 get_unaligned_be16(const void *p)
static __always_inline u16 get_unaligned_be16(const void *p)
{
return be16_to_cpup((__be16 *)p);
}
 
static inline u32 get_unaligned_be32(const void *p)
static __always_inline u32 get_unaligned_be32(const void *p)
{
return be32_to_cpup((__be32 *)p);
}
 
static inline u64 get_unaligned_be64(const void *p)
static __always_inline u64 get_unaligned_be64(const void *p)
{
return be64_to_cpup((__be64 *)p);
}
 
static inline void put_unaligned_le16(u16 val, void *p)
static __always_inline void put_unaligned_le16(u16 val, void *p)
{
*((__le16 *)p) = cpu_to_le16(val);
}
 
static inline void put_unaligned_le32(u32 val, void *p)
static __always_inline void put_unaligned_le32(u32 val, void *p)
{
*((__le32 *)p) = cpu_to_le32(val);
}
 
static inline void put_unaligned_le64(u64 val, void *p)
static __always_inline void put_unaligned_le64(u64 val, void *p)
{
*((__le64 *)p) = cpu_to_le64(val);
}
 
static inline void put_unaligned_be16(u16 val, void *p)
static __always_inline void put_unaligned_be16(u16 val, void *p)
{
*((__be16 *)p) = cpu_to_be16(val);
}
 
static inline void put_unaligned_be32(u32 val, void *p)
static __always_inline void put_unaligned_be32(u32 val, void *p)
{
*((__be32 *)p) = cpu_to_be32(val);
}
 
static inline void put_unaligned_be64(u64 val, void *p)
static __always_inline void put_unaligned_be64(u64 val, void *p)
{
*((__be64 *)p) = cpu_to_be64(val);
}
/drivers/include/linux/vga_switcheroo.h
0,0 → 1,200
/*
* vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
* Copyright (c) 2015 Lukas Wunner <lukas@wunner.de>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS
* IN THE SOFTWARE.
*
*/
 
#ifndef _LINUX_VGA_SWITCHEROO_H_
#define _LINUX_VGA_SWITCHEROO_H_
 
#include <linux/fb.h>
 
struct pci_dev;
 
/**
* enum vga_switcheroo_handler_flags_t - handler flags bitmask
* @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
* DDC lines separately. This signals to clients that they should call
* drm_get_edid_switcheroo() to probe the EDID
* @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
* the AUX channel separately. This signals to clients that the active
* GPU needs to train the link and communicate the link parameters to the
* inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
* skip the AUX handshake and set up its output with these pre-calibrated
* values (DisplayPort specification v1.1a, section 2.5.3.3)
*
* Handler flags bitmask. Used by handlers to declare their capabilities upon
* registering with vga_switcheroo.
*/
enum vga_switcheroo_handler_flags_t {
VGA_SWITCHEROO_CAN_SWITCH_DDC = (1 << 0),
VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1),
};
 
/**
* enum vga_switcheroo_state - client power state
* @VGA_SWITCHEROO_OFF: off
* @VGA_SWITCHEROO_ON: on
* @VGA_SWITCHEROO_NOT_FOUND: client has not registered with vga_switcheroo.
* Only used in vga_switcheroo_get_client_state() which in turn is only
* called from hda_intel.c
*
* Client power state.
*/
enum vga_switcheroo_state {
VGA_SWITCHEROO_OFF,
VGA_SWITCHEROO_ON,
/* below are referred only from vga_switcheroo_get_client_state() */
VGA_SWITCHEROO_NOT_FOUND,
};
 
/**
* enum vga_switcheroo_client_id - client identifier
* @VGA_SWITCHEROO_UNKNOWN_ID: initial identifier assigned to vga clients.
* Determining the id requires the handler, so GPUs are given their
* true id in a delayed fashion in vga_switcheroo_enable()
* @VGA_SWITCHEROO_IGD: integrated graphics device
* @VGA_SWITCHEROO_DIS: discrete graphics device
* @VGA_SWITCHEROO_MAX_CLIENTS: currently no more than two GPUs are supported
*
* Client identifier. Audio clients use the same identifier & 0x100.
*/
enum vga_switcheroo_client_id {
VGA_SWITCHEROO_UNKNOWN_ID = -1,
VGA_SWITCHEROO_IGD,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
 
/**
* struct vga_switcheroo_handler - handler callbacks
* @init: initialize handler.
* Optional. This gets called when vga_switcheroo is enabled, i.e. when
* two vga clients have registered. It allows the handler to perform
* some delayed initialization that depends on the existence of the
* vga clients. Currently only the radeon and amdgpu drivers use this.
* The return value is ignored
* @switchto: switch outputs to given client.
* Mandatory. For muxless machines this should be a no-op. Returning 0
* denotes success, anything else failure (in which case the switch is
* aborted)
* @switch_ddc: switch DDC lines to given client.
* Optional. Should return the previous DDC owner on success or a
* negative int on failure
* @power_state: cut or reinstate power of given client.
* Optional. The return value is ignored
* @get_client_id: determine if given pci device is integrated or discrete GPU.
* Mandatory
*
* Handler callbacks. The multiplexer itself. The @switchto and @get_client_id
* methods are mandatory, all others may be set to NULL.
*/
struct vga_switcheroo_handler {
int (*init)(void);
int (*switchto)(enum vga_switcheroo_client_id id);
int (*switch_ddc)(enum vga_switcheroo_client_id id);
int (*power_state)(enum vga_switcheroo_client_id id,
enum vga_switcheroo_state state);
enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
};
 
/**
* struct vga_switcheroo_client_ops - client callbacks
* @set_gpu_state: do the equivalent of suspend/resume for the card.
* Mandatory. This should not cut power to the discrete GPU,
* which is the job of the handler
* @reprobe: poll outputs.
* Optional. This gets called after waking the GPU and switching
* the outputs to it
* @can_switch: check if the device is in a position to switch now.
* Mandatory. The client should return false if a user space process
* has one of its device files open
*
* Client callbacks. A client can be either a GPU or an audio device on a GPU.
* The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
* set to NULL. For audio clients, the @reprobe member is bogus.
*/
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
bool (*can_switch)(struct pci_dev *dev);
};
 
#if defined(CONFIG_VGA_SWITCHEROO)
void vga_switcheroo_unregister_client(struct pci_dev *dev);
int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id);
 
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
 
int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
enum vga_switcheroo_handler_flags_t handler_flags);
void vga_switcheroo_unregister_handler(void);
enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
 
int vga_switcheroo_process_delayed_switch(void);
 
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
 
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
 
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
 
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
 
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
 
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
 
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
/drivers/include/linux/vmalloc.h
4,6 → 4,7
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/page.h> /* pgprot_t */
#include <linux/rbtree.h>
 
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
/drivers/include/linux/wait.h
312,6 → 312,8
// wait_queue_head_t wait;
//};
 
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
 
 
/drivers/include/linux/workqueue.h
11,7 → 11,7
#include <linux/lockdep.h>
#include <linux/threads.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
 
struct workqueue_struct;
 
239,10 → 239,20
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
 
bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
int queue_delayed_work(struct workqueue_struct *wq,
bool queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
extern bool cancel_work_sync(struct work_struct *work);
extern bool cancel_delayed_work(struct delayed_work *dwork);
extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
 
 
bool schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
static inline bool mod_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork,
unsigned long delay)
{
return queue_delayed_work(wq, dwork, delay);
}
 
 
#define INIT_WORK(_work, _func) \