Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5055 → Rev 5056

/drivers/include/linux/backlight.h
7,4 → 7,36
 
#ifndef _LINUX_BACKLIGHT_H
#define _LINUX_BACKLIGHT_H
/* Notes on locking:
*
* backlight_device->ops_lock is an internal backlight lock protecting the
* ops pointer and no code outside the core should need to touch it.
*
* Access to update_status() is serialised by the update_lock mutex since
* most drivers seem to need this and historically get it wrong.
*
* Most drivers don't need locking on their get_brightness() method.
* If yours does, you need to implement it in the driver. You can use the
* update_lock mutex if appropriate.
*
* Any other use of the locks below is probably wrong.
*/
 
enum backlight_update_reason {
BACKLIGHT_UPDATE_HOTKEY,
BACKLIGHT_UPDATE_SYSFS,
};
 
enum backlight_type {
BACKLIGHT_RAW = 1,
BACKLIGHT_PLATFORM,
BACKLIGHT_FIRMWARE,
BACKLIGHT_TYPE_MAX,
};
 
enum backlight_notification {
BACKLIGHT_REGISTERED,
BACKLIGHT_UNREGISTERED,
};
 
#endif
/drivers/include/linux/bitmap.h
88,32 → 88,32
* lib/bitmap.c provides these functions:
*/
 
extern int __bitmap_empty(const unsigned long *bitmap, int bits);
extern int __bitmap_full(const unsigned long *bitmap, int bits);
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits);
extern int __bitmap_equal(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src,
int bits);
unsigned int nbits);
extern void __bitmap_shift_right(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern void __bitmap_shift_left(unsigned long *dst,
const unsigned long *src, int shift, int bits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
 
extern void bitmap_set(unsigned long *map, int i, int len);
extern void bitmap_clear(unsigned long *map, int start, int nr);
extern void bitmap_set(unsigned long *map, unsigned int start, int len);
extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
140,9 → 140,9
const unsigned long *relmap, int bits);
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
int sz, int bits);
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
 
188,15 → 188,15
}
 
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & *src2) != 0;
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_and(dst, src1, src2, nbits);
}
 
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
205,7 → 205,7
}
 
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 ^ *src2;
214,24 → 214,24
}
 
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return (*dst = *src1 & ~(*src2)) != 0;
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
return __bitmap_andnot(dst, src1, src2, nbits);
}
 
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src,
int nbits)
unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits);
*dst = ~(*src);
else
__bitmap_complement(dst, src, nbits);
}
 
static inline int bitmap_equal(const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
240,7 → 240,7
}
 
static inline int bitmap_intersects(const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
249,7 → 249,7
}
 
static inline int bitmap_subset(const unsigned long *src1,
const unsigned long *src2, int nbits)
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits));
257,7 → 257,7
return __bitmap_subset(src1, src2, nbits);
}
 
static inline int bitmap_empty(const unsigned long *src, int nbits)
static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
265,7 → 265,7
return __bitmap_empty(src, nbits);
}
 
static inline int bitmap_full(const unsigned long *src, int nbits)
static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
273,7 → 273,7
return __bitmap_full(src, nbits);
}
 
static inline int bitmap_weight(const unsigned long *src, int nbits)
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
284,7 → 284,7
const unsigned long *src, int n, int nbits)
{
if (small_const_nbits(nbits))
*dst = *src >> n;
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n;
else
__bitmap_shift_right(dst, src, n, nbits);
}
/drivers/include/linux/bitops.h
4,12 → 4,23
 
#ifdef __KERNEL__
#define BIT(nr) (1UL << (nr))
#define BIT_ULL(nr) (1ULL << (nr))
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
#define BITS_PER_BYTE 8
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
#endif
 
/*
* Create a contiguous bitmask starting at bit position @l and ending at
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
 
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
extern unsigned int __sw_hweight32(unsigned int w);
185,6 → 196,21
 
#ifdef __KERNEL__
 
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
\
do { \
old = ACCESS_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
new; \
})
#endif
 
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region
/drivers/include/linux/bug.h
57,6 → 57,7
 
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
 
/* Force a compilation error if a constant expression is not a power of 2 */
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
 
/drivers/include/linux/byteorder/generic.h
2,7 → 2,7
#define _LINUX_BYTEORDER_GENERIC_H
 
/*
* linux/byteorder_generic.h
* linux/byteorder/generic.h
* Generic Byte-reordering support
*
* The "... p" macros, like le64_to_cpup, can be used with pointers
/drivers/include/linux/compiler-gcc.h
37,6 → 37,9
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
(typeof(ptr)) (__ptr + (off)); })
 
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
 
#ifdef __CHECKER__
#define __must_be_array(arr) 0
#else
50,9 → 53,14
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
# define inline inline __attribute__((always_inline))
# define __inline__ __inline__ __attribute__((always_inline))
# define __inline __inline __attribute__((always_inline))
# define inline inline __attribute__((always_inline)) notrace
# define __inline__ __inline__ __attribute__((always_inline)) notrace
# define __inline __inline __attribute__((always_inline)) notrace
#else
/* A lot of inline functions can cause havoc with function tracing */
# define inline inline notrace
# define __inline__ __inline__ notrace
# define __inline __inline notrace
#endif
 
#define __deprecated __attribute__((deprecated))
/drivers/include/linux/compiler-gcc4.h
75,11 → 75,7
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
#if GCC_VERSION <= 40801
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#else
# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
#endif
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
/drivers/include/linux/compiler.h
63,6 → 63,13
# include <linux/compiler-intel.h>
#endif
 
/* Clang compiler defines __GNUC__. So we will overwrite implementations
* coming from above header files here
*/
#ifdef __clang__
#include <linux/compiler-clang.h>
#endif
 
/*
* Generic compiler-dependent macros required for kernel
* build go below this comment. Actual compiler/compiler version
170,6 → 177,10
(typeof(ptr)) (__ptr + (off)); })
#endif
 
#ifndef OPTIMIZER_HIDE_VAR
#define OPTIMIZER_HIDE_VAR(var) barrier()
#endif
 
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
298,6 → 309,11
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
#endif
 
/* Is this type a native word size -- useful for atomic operations */
#ifndef __native_word
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
 
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
307,9 → 323,18
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
/*
* Sparse complains of variable sized arrays due to the temporary variable in
* __compiletime_assert. Unfortunately we can't just expand it out to make
* sparse see a constant array size without breaking compiletime_assert on old
* versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
*/
# ifndef __CHECKER__
# define __compiletime_error_fallback(condition) \
do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
#else
# endif
#endif
#ifndef __compiletime_error_fallback
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
 
337,6 → 362,10
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
 
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
 
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
354,7 → 383,9
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
#ifdef CONFIG_KPROBES
# define __kprobes __attribute__((__section__(".kprobes.text")))
# define nokprobe_inline __always_inline
#else
# define __kprobes
# define nokprobe_inline inline
#endif
#endif /* __LINUX_COMPILER_H */
/drivers/include/linux/dma-buf.h
115,6 → 115,7
* @exp_name: name of the exporter; useful for debugging.
* @list_node: node for dma_buf accounting and debugging.
* @priv: exporter specific private data for this buffer object.
* @resv: reservation object linked to this dma-buf
*/
struct dma_buf {
size_t size;
168,10 → 169,11
struct dma_buf_attachment *dmabuf_attach);
 
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *);
size_t size, int flags, const char *,
struct reservation_object *);
 
#define dma_buf_export(priv, ops, size, flags) \
dma_buf_export_named(priv, ops, size, flags, __FILE__)
#define dma_buf_export(priv, ops, size, flags, resv) \
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv)
 
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
194,4 → 196,6
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
int dma_buf_debugfs_create_file(const char *name,
int (*write)(struct seq_file *));
#endif /* __DMA_BUF_H__ */
/drivers/include/linux/err.h
2,12 → 2,13
#define _LINUX_ERR_H
 
#include <linux/compiler.h>
#include <linux/types.h>
 
#include <errno.h>
 
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry
* scheme where we can return either an error code or a normal
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
29,12 → 30,12
return (long) ptr;
}
 
static inline long __must_check IS_ERR(__force const void *ptr)
static inline bool __must_check IS_ERR(__force const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
 
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr)
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
/drivers/include/linux/fb.h
413,6 → 413,8
struct fb_info;
struct device;
struct file;
struct videomode;
struct device_node;
 
/* Definitions below are used in the parsed monitor specs */
#define FB_DPMS_ACTIVE_OFF 1
439,6 → 441,7
 
#define FB_MISC_PRIM_COLOR 1
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */
#define FB_MISC_HDMI 4
struct fb_chroma {
__u32 redx; /* in fraction of 1024 */
__u32 greenx;
690,6 → 693,10
 
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
 
/* called at KDB enter and leave time to prepare the console */
int (*fb_debug_enter)(struct fb_info *info);
int (*fb_debug_leave)(struct fb_info *info);
};
 
#ifdef CONFIG_FB_TILEBLITTING
938,7 → 945,7
#define fb_memcpy_fromfb sbus_memcpy_fromio
#define fb_memcpy_tofb sbus_memcpy_toio
 
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__)
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__)
 
#define fb_readb __raw_readb
#define fb_readw __raw_readw
999,7 → 1006,7
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a,
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
extern int fb_show_logo(struct fb_info *fb_info, int rotate);
1027,7 → 1034,7
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
u8 *src, u32 s_pitch, u32 height)
{
int i, j;
u32 i, j;
 
d_pitch -= s_pitch;
 
/drivers/include/linux/file.h
4,4 → 4,5
 
#ifndef __LINUX_FILE_H
#define __LINUX_FILE_H
struct file;
#endif /* __LINUX_FILE_H */
/drivers/include/linux/firmware.h
43,9 → 43,11
int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int request_firmware_nowait(
struct module *module, int uevent,
const char *name, struct device *device, void *context,
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
void (*cont)(const struct firmware *fw, void *context));
int request_firmware_direct(const struct firmware **fw, const char *name,
struct device *device);
 
void release_firmware(const struct firmware *fw);
 
/drivers/include/linux/hashtable.h
0,0 → 1,205
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
 
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
 
#include <linux/list.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/hash.h>
#include <linux/rculist.h>
 
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
 
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
 
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
 
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
 
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
 
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
}
 
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
 
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
 
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
 
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
 
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
 
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
 
return true;
}
 
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
 
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
 
/**
* hash_del_rcu - remove an object from a rcu enabled hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del_rcu(struct hlist_node *node)
{
hlist_del_init_rcu(node);
}
 
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry(obj, &name[bkt], member)
 
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
 
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @tmp: a &struct used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
 
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
 
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, member, key) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member)
 
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*
* This is the same as hash_for_each_possible_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
hlist_for_each_entry_rcu_notrace(obj, \
&name[hash_min(key, HASH_BITS(name))], member)
 
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @tmp: a &struct used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
 
 
#endif
/drivers/include/linux/hdmi.h
262,6 → 262,18
struct hdmi_vendor_infoframe hdmi;
};
 
/**
* union hdmi_infoframe - overall union of all abstract infoframe representations
* @any: generic infoframe
* @avi: avi infoframe
* @spd: spd infoframe
* @vendor: union of all vendor infoframes
* @audio: audio infoframe
*
* This is used by the generic pack function. This works since all infoframes
* have the same header which also indicates which type of infoframe should be
* packed.
*/
union hdmi_infoframe {
struct hdmi_any_infoframe any;
struct hdmi_avi_infoframe avi;
/drivers/include/linux/i2c.h
135,7 → 135,6
* @name: Indicates the type of the device, usually a chip name that's
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
* @driver: device's driver, hence pointer to access routines
* @dev: Driver model device node for the slave.
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
152,7 → 151,6
/* _LOWER_ 7 bits */
char name[I2C_NAME_SIZE];
struct i2c_adapter *adapter; /* the adapter we sit on */
struct i2c_driver *driver; /* and our access routines */
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
160,6 → 158,7
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
 
extern struct i2c_client *i2c_verify_client(struct device *dev);
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
 
/**
* struct i2c_board_info - template for device creation
209,6 → 208,10
* i2c_algorithm is the interface to a class of hardware solutions which can
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
* The return codes from the @master_xfer field should indicate the type of
* error code that occured during the transfer, as documented in the kernel
* Documentation file Documentation/i2c/fault-codes.
*/
struct i2c_algorithm {
/* If an adapter algorithm can't do I2C-level access, set master_xfer
275,6 → 278,7
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
#define I2C_CLASS_SPD (1<<7) /* Memory modules */
#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */
 
/* Internal numbers to terminate lists */
#define I2C_CLIENT_END 0xfffeU
/drivers/include/linux/idr.h
35,21 → 35,24
 
struct idr_layer {
int prefix; /* the ID prefix of this idr_layer */
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
int layer; /* distance from leaf */
struct idr_layer __rcu *ary[1<<IDR_BITS];
int count; /* When zero, we can release it */
int layer; /* distance from leaf */
union {
/* A zero bit means "space here" */
DECLARE_BITMAP(bitmap, IDR_SIZE);
struct rcu_head rcu_head;
};
};
 
struct idr {
struct idr_layer __rcu *hint; /* the last layer allocated from */
struct idr_layer __rcu *top;
struct idr_layer *id_free;
int layers; /* only valid w/o concurrent changes */
int id_free_cnt;
int cur; /* current pos for cyclic allocation */
spinlock_t lock;
int id_free_cnt;
struct idr_layer *id_free;
};
 
#define IDR_INIT(name) \
88,9 → 91,9
void *idr_get_next(struct idr *idp, int *nextid);
void *idr_replace(struct idr *idp, void *ptr, int id);
void idr_remove(struct idr *idp, int id);
void idr_free(struct idr *idp, int id);
void idr_destroy(struct idr *idp);
void idr_init(struct idr *idp);
bool idr_is_empty(struct idr *idp);
 
/**
* idr_preload_end - end preload section started with idr_preload()
139,69 → 142,6
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
 
/*
* Don't use the following functions. These exist only to suppress
* deprecated warnings on EXPORT_SYMBOL()s.
*/
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void __idr_remove_all(struct idr *idp);
 
/**
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
return __idr_pre_get(idp, gfp_mask);
}
 
/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
int starting_id, int *id)
{
return __idr_get_new_above(idp, ptr, starting_id, id);
}
 
/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
{
return __idr_get_new_above(idp, ptr, 0, id);
}
 
/**
* idr_remove_all - remove all ids from the given idr tree
* @idp: idr handle
*
* If you're trying to destroy @idp, calling idr_destroy() is enough.
* This is going away. Don't use.
*/
static inline void __deprecated idr_remove_all(struct idr *idp)
{
__idr_remove_all(idp);
}
 
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
*
/drivers/include/linux/interval_tree.h
0,0 → 1,27
#ifndef _LINUX_INTERVAL_TREE_H
#define _LINUX_INTERVAL_TREE_H
 
#include <linux/rbtree.h>
 
struct interval_tree_node {
struct rb_node rb;
unsigned long start; /* Start of interval */
unsigned long last; /* Last location _in_ interval */
unsigned long __subtree_last;
};
 
extern void
interval_tree_insert(struct interval_tree_node *node, struct rb_root *root);
 
extern void
interval_tree_remove(struct interval_tree_node *node, struct rb_root *root);
 
extern struct interval_tree_node *
interval_tree_iter_first(struct rb_root *root,
unsigned long start, unsigned long last);
 
extern struct interval_tree_node *
interval_tree_iter_next(struct interval_tree_node *node,
unsigned long start, unsigned long last);
 
#endif /* _LINUX_INTERVAL_TREE_H */
/drivers/include/linux/interval_tree_generic.h
0,0 → 1,191
/*
Interval Trees
(C) 2012 Michel Lespinasse <walken@google.com>
 
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
 
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
include/linux/interval_tree_generic.h
*/
 
#include <linux/rbtree_augmented.h>
 
/*
* Template for implementing interval trees
*
* ITSTRUCT: struct type of the interval tree nodes
* ITRB: name of struct rb_node field within ITSTRUCT
* ITTYPE: type of the interval endpoints
* ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
* ITSTART(n): start endpoint of ITSTRUCT node n
* ITLAST(n): last endpoint of ITSTRUCT node n
* ITSTATIC: 'static' or empty
* ITPREFIX: prefix to use for the inline tree definitions
*
* Note - before using this, please consider if non-generic version
* (interval_tree.h) would work for you...
*/
 
#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \
ITSTART, ITLAST, ITSTATIC, ITPREFIX) \
\
/* Callbacks for augmented rbtree insert and remove */ \
\
static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \
{ \
ITTYPE max = ITLAST(node), subtree_last; \
if (node->ITRB.rb_left) { \
subtree_last = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
if (node->ITRB.rb_right) { \
subtree_last = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB)->ITSUBTREE; \
if (max < subtree_last) \
max = subtree_last; \
} \
return max; \
} \
\
RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \
ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \
\
/* Insert / remove interval nodes from the tree */ \
\
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \
{ \
struct rb_node **link = &root->rb_node, *rb_parent = NULL; \
ITTYPE start = ITSTART(node), last = ITLAST(node); \
ITSTRUCT *parent; \
\
while (*link) { \
rb_parent = *link; \
parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
if (parent->ITSUBTREE < last) \
parent->ITSUBTREE = last; \
if (start < ITSTART(parent)) \
link = &parent->ITRB.rb_left; \
else \
link = &parent->ITRB.rb_right; \
} \
\
node->ITSUBTREE = last; \
rb_link_node(&node->ITRB, rb_parent, link); \
rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \
{ \
rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \
} \
\
/* \
* Iterate over intervals intersecting [start;last] \
* \
* Note that a node's interval intersects [start;last] iff: \
* Cond1: ITSTART(node) <= last \
* and \
* Cond2: start <= ITLAST(node) \
*/ \
\
static ITSTRUCT * \
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
while (true) { \
/* \
* Loop invariant: start <= node->ITSUBTREE \
* (Cond2 is satisfied by one of the subtree nodes) \
*/ \
if (node->ITRB.rb_left) { \
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
ITSTRUCT, ITRB); \
if (start <= left->ITSUBTREE) { \
/* \
* Some nodes in left subtree satisfy Cond2. \
* Iterate to find the leftmost such node N. \
* If it also satisfies Cond1, that's the \
* match we are looking for. Otherwise, there \
* is no matching interval as nodes to the \
* right of N can't satisfy Cond1 either. \
*/ \
node = left; \
continue; \
} \
} \
if (ITSTART(node) <= last) { /* Cond1 */ \
if (start <= ITLAST(node)) /* Cond2 */ \
return node; /* node is leftmost match */ \
if (node->ITRB.rb_right) { \
node = rb_entry(node->ITRB.rb_right, \
ITSTRUCT, ITRB); \
if (start <= node->ITSUBTREE) \
continue; \
} \
} \
return NULL; /* No match */ \
} \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \
{ \
ITSTRUCT *node; \
\
if (!root->rb_node) \
return NULL; \
node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \
if (node->ITSUBTREE < start) \
return NULL; \
return ITPREFIX ## _subtree_search(node, start, last); \
} \
\
ITSTATIC ITSTRUCT * \
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
{ \
struct rb_node *rb = node->ITRB.rb_right, *prev; \
\
while (true) { \
/* \
* Loop invariants: \
* Cond1: ITSTART(node) <= last \
* rb == node->ITRB.rb_right \
* \
* First, search right subtree if suitable \
*/ \
if (rb) { \
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
if (start <= right->ITSUBTREE) \
return ITPREFIX ## _subtree_search(right, \
start, last); \
} \
\
/* Move up the tree until we come from a node's left child */ \
do { \
rb = rb_parent(&node->ITRB); \
if (!rb) \
return NULL; \
prev = &node->ITRB; \
node = rb_entry(rb, ITSTRUCT, ITRB); \
rb = node->ITRB.rb_right; \
} while (prev == rb); \
\
/* Check if the node intersects [start;last] */ \
if (last < ITSTART(node)) /* !Cond1 */ \
return NULL; \
else if (start <= ITLAST(node)) /* Cond2 */ \
return node; \
} \
}
/drivers/include/linux/ioport.h
57,7 → 57,7
 
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
#define IORESOURCE_DISABLED 0x10000000
#define IORESOURCE_UNSET 0x20000000
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
#define IORESOURCE_AUTO 0x40000000
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
 
/drivers/include/linux/irqreturn.h
14,6 → 14,6
};
 
typedef enum irqreturn irqreturn_t;
#define IRQ_RETVAL(x) ((x) != IRQ_NONE)
#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE)
 
#endif
/drivers/include/linux/jiffies.h
76,10 → 76,18
* The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
extern u64 jiffies_64;
extern unsigned long volatile jiffies;
 
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
static inline u64 get_jiffies_64(void)
{
return (u64)GetTimerTicks();
return (u64)jiffies;
}
#endif
 
/*
* These inlines deal with timer wrapping correctly. You are
290,6 → 298,12
*/
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
 
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
 
extern unsigned long msecs_to_jiffies(const unsigned int m);
extern unsigned long usecs_to_jiffies(const unsigned int u);
extern unsigned long timespec_to_jiffies(const struct timespec *value);
/drivers/include/linux/kernel.h
31,6 → 31,19
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
 
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX>>1))
#define S8_MIN ((s8)(-S8_MAX - 1))
#define U16_MAX ((u16)~0U)
#define S16_MAX ((s16)(U16_MAX>>1))
#define S16_MIN ((s16)(-S16_MAX - 1))
#define U32_MAX ((u32)~0U)
#define S32_MAX ((s32)(U32_MAX>>1))
#define S32_MIN ((s32)(-S32_MAX - 1))
#define U64_MAX ((u64)~0ULL)
#define S64_MAX ((s64)(U64_MAX>>1))
#define S64_MIN ((s64)(-S64_MAX - 1))
 
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
126,6 → 139,13
*/
#define lower_32_bits(n) ((u32)(n))
 
 
 
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
 
#define KERN_EMERG "<0>" /* system is unusable */
#define KERN_ALERT "<1>" /* action must be taken immediately */
#define KERN_CRIT "<2>" /* critical conditions */
159,6 → 179,9
 
#define printk(fmt, arg...) dbgprintf(fmt , ##arg)
 
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
extern __printf(2, 3)
char *kasprintf(gfp_t gfp, const char *fmt, ...);
 
/*
* min()/max()/clamp() macros that also do
493,5 → 516,35
})
 
 
static inline __must_check long __copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n)) {
switch(n) {
case 1:
*(u8 __force *)to = *(u8 *)from;
return 0;
case 2:
*(u16 __force *)to = *(u16 *)from;
return 0;
case 4:
*(u32 __force *)to = *(u32 *)from;
return 0;
#ifdef CONFIG_64BIT
case 8:
*(u64 __force *)to = *(u64 *)from;
return 0;
#endif
default:
break;
}
}
 
memcpy((void __force *)to, from, n);
return 0;
}
 
struct seq_file;
 
#endif
 
/drivers/include/linux/kgdb.h
0,0 → 1,24
#ifndef _KDB_H
#define _KDB_H
 
/*
* Kernel Debugger Architecture Independent Global Headers
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
 
typedef enum {
KDB_REPEAT_NONE = 0, /* Do not repeat this command */
KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
} kdb_repeat_t;
 
typedef int (*kdb_func_t)(int, const char **);
 
#endif /* !_KDB_H */
/drivers/include/linux/kobject.h
31,8 → 31,10
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */
 
#ifdef CONFIG_UEVENT_HELPER
/* path to the userspace helper executed on an event */
extern char uevent_helper[];
#endif
 
/* counter to tag the uevent, read only except for the kobject core */
extern u64 uevent_seqnum;
65,6 → 67,9
struct kobj_type *ktype;
// struct sysfs_dirent *sd;
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
struct delayed_work release;
#endif
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
103,6 → 108,7
extern struct kobject *kobject_get(struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
 
extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
 
struct kobj_type {
/drivers/include/linux/list.h
361,6 → 361,17
list_entry((ptr)->next, type, member)
 
/**
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_struct within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
 
/**
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
372,6 → 383,22
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
 
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
 
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_struct within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, typeof(*(pos)), member)
 
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
415,9 → 442,9
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
for (pos = list_first_entry(head, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
pos = list_next_entry(pos, member))
 
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
426,9 → 453,9
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member); \
for (pos = list_last_entry(head, typeof(*pos), member); \
&pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
pos = list_prev_entry(pos, member))
 
/**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
451,9 → 478,9
* the current position.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member); \
for (pos = list_next_entry(pos, member); \
&pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
pos = list_next_entry(pos, member))
 
/**
* list_for_each_entry_continue_reverse - iterate backwards from the given point
465,9 → 492,9
* the current position.
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \
for (pos = list_prev_entry(pos, member); \
&pos->member != (head); \
pos = list_entry(pos->member.prev, typeof(*pos), member))
pos = list_prev_entry(pos, member))
 
/**
* list_for_each_entry_from - iterate over list of given type from the current point
479,7 → 506,7
*/
#define list_for_each_entry_from(pos, head, member) \
for (; &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))
pos = list_next_entry(pos, member))
 
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
489,10 → 516,10
* @member: the name of the list_struct within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
for (pos = list_first_entry(head, typeof(*pos), member), \
n = list_next_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
pos = n, n = list_next_entry(n, member))
 
/**
* list_for_each_entry_safe_continue - continue list iteration safe against removal
505,10 → 532,10
* safe against removal of list entry.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_entry(pos->member.next, typeof(*pos), member), \
n = list_entry(pos->member.next, typeof(*pos), member); \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
pos = n, n = list_next_entry(n, member))
 
/**
* list_for_each_entry_safe_from - iterate over list from current point safe against removal
521,9 → 548,9
* removal of list entry.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_entry(pos->member.next, typeof(*pos), member); \
for (n = list_next_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.next, typeof(*n), member))
pos = n, n = list_next_entry(n, member))
 
/**
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
536,10 → 563,10
* of list entry.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_entry((head)->prev, typeof(*pos), member), \
n = list_entry(pos->member.prev, typeof(*pos), member); \
for (pos = list_last_entry(head, typeof(*pos), member), \
n = list_prev_entry(pos, member); \
&pos->member != (head); \
pos = n, n = list_entry(n->member.prev, typeof(*n), member))
pos = n, n = list_prev_entry(n, member))
 
/**
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
554,7 → 581,7
* completing the current iteration of the loop body.
*/
#define list_safe_reset_next(pos, n, member) \
n = list_entry(pos->member.next, typeof(*pos), member)
n = list_next_entry(pos, member)
 
/*
* Double linked lists with a single pointer list head.
626,15 → 653,15
*(n->pprev) = n;
}
 
static inline void hlist_add_after(struct hlist_node *n,
struct hlist_node *next)
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
next->next = n->next;
n->next = next;
next->pprev = &n->next;
n->next = prev->next;
prev->next = n;
n->pprev = &prev->next;
 
if(next->next)
next->next->pprev = &next->next;
if (n->next)
n->next->pprev = &n->next;
}
 
/* after that we'll appear to be on some hlist and hlist_del will work */
/drivers/include/linux/lockdep.h
228,9 → 228,9
unsigned int trylock:1; /* 16 bits */
 
unsigned int read:2; /* see lock_acquire() comment */
unsigned int check:2; /* see lock_acquire() comment */
unsigned int check:1; /* see lock_acquire() comment */
unsigned int hardirqs_off:1;
unsigned int references:11; /* 32 bits */
unsigned int references:12; /* 32 bits */
};
 
/*
241,7 → 241,7
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
extern void lockdep_sys_exit(void);
extern asmlinkage void lockdep_sys_exit(void);
 
extern void lockdep_off(void);
extern void lockdep_on(void);
279,7 → 279,7
(lock)->dep_map.key, sub)
 
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class(lock, &__lockdep_no_validate__)
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
/*
* Compare locking classes
*/
302,9 → 302,8
*
* Values for check:
*
* 0: disabled
* 1: simple checks (freeing, held-at-exit-time, etc.)
* 2: full validation
* 0: simple checks (freeing, held-at-exit-time, etc.)
* 1: full validation
*/
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
335,10 → 334,14
 
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
 
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
#define lockdep_assert_held(l) do { \
WARN_ON(debug_locks && !lockdep_is_held(l)); \
} while (0)
 
#else /* !LOCKDEP */
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
 
#else /* !CONFIG_LOCKDEP */
 
static inline void lockdep_off(void)
{
}
384,7 → 387,7
 
#define lockdep_depth(tsk) (0)
 
#define lockdep_assert_held(l) do { } while (0)
#define lockdep_assert_held(l) do { (void)(l); } while (0)
 
#define lockdep_recursing(tsk) (0)
 
532,13 → 535,13
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
} while (0)
#else
/drivers/include/linux/math64.h
133,4 → 133,34
return ret;
}
 
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
 
#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
return (u64)(((unsigned __int128)a * mul) >> shift);
}
#endif /* mul_u64_u32_shr */
 
#else
 
#ifndef mul_u64_u32_shr
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
{
u32 ah, al;
u64 ret;
 
al = a;
ah = a >> 32;
 
ret = ((u64)al * mul) >> shift;
if (ah)
ret += ((u64)ah * mul) << (32 - shift);
 
return ret;
}
#endif /* mul_u64_u32_shr */
 
#endif
 
#endif /* _LINUX_MATH64_H */
/drivers/include/linux/mod_devicetable.h
431,6 → 431,14
kernel_ulong_t driver_data; /* Data private to the driver */
};
 
#define SPMI_NAME_SIZE 32
#define SPMI_MODULE_PREFIX "spmi:"
 
struct spmi_device_id {
char name[SPMI_NAME_SIZE];
kernel_ulong_t driver_data; /* Data private to the driver */
};
 
/* dmi */
enum dmi_field {
DMI_NONE,
547,6 → 555,11
* See documentation of "x86_match_cpu" for details.
*/
 
/*
* MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id.
* Although gcc seems to ignore this error, clang fails without this define.
*/
#define x86cpu_device_id x86_cpu_id
struct x86_cpu_id {
__u16 vendor;
__u16 family;
563,6 → 576,15
#define X86_MODEL_ANY 0
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
 
/*
* Generic table type for matching CPU features.
* @feature: the bit number of the feature (0 - 65535)
*/
 
struct cpu_feature {
__u16 feature;
};
 
#define IPACK_ANY_FORMAT 0xff
#define IPACK_ANY_ID (~0)
struct ipack_device_id {
598,4 → 620,9
__u16 asm_did, asm_vid;
};
 
struct mcb_device_id {
__u16 device;
kernel_ulong_t driver_data;
};
 
#endif /* LINUX_MOD_DEVICETABLE_H */
/drivers/include/linux/mutex.h
92,4 → 92,16
return atomic_read(&lock->count) != 1;
}
 
static inline int mutex_trylock(struct mutex *lock)
{
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1))
return 1;
return 0;
}
 
static inline void mutex_destroy(struct mutex *lock)
{
 
};
 
#endif
/drivers/include/linux/pci.h
456,6 → 456,7
(pci_resource_end((dev), (bar)) - \
pci_resource_start((dev), (bar)) + 1))
 
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
 
struct pci_bus {
struct list_head node; /* node in list of buses */
480,7 → 481,7
char name[48];
 
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */
pci_bus_flags_t bus_flags; /* Inherited by child busses */
pci_bus_flags_t bus_flags; /* inherited by child buses */
struct device *bridge;
struct device dev;
struct bin_attribute *legacy_io; /* legacy I/O for this bus */
508,8 → 509,12
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
 
/*
* Returns true if the pci bus is root (behind host-pci bridge),
* Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
*
* Some code assumes that "bus->self == NULL" means that bus is a root bus.
* This is incorrect because "virtual" buses added for SR-IOV (via
* virtfn_add_bus()) have "bus->self == NULL" but are not root buses.
*/
static inline bool pci_is_root_bus(struct pci_bus *pbus)
{
531,6 → 536,32
#define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
 
/*
* Translate above to generic errno for passing back through non-PCI code.
*/
static inline int pcibios_err_to_errno(int err)
{
if (err <= PCIBIOS_SUCCESSFUL)
return err; /* Assume already errno */
 
switch (err) {
case PCIBIOS_FUNC_NOT_SUPPORTED:
return -ENOENT;
case PCIBIOS_BAD_VENDOR_ID:
return -EINVAL;
case PCIBIOS_DEVICE_NOT_FOUND:
return -ENODEV;
case PCIBIOS_BAD_REGISTER_NUMBER:
return -EFAULT;
case PCIBIOS_SET_FAILED:
return -EIO;
case PCIBIOS_BUFFER_TOO_SMALL:
return -ENOSPC;
}
 
return -ENOTTY;
}
 
/* Low-level architecture-dependent routines */
 
struct pci_ops {
586,7 → 617,7
* pci_is_pcie - check if the PCI device is PCI Express capable
* @dev: PCI device
*
* Retrun true if the PCI device is PCI Express capable, false otherwise.
* Returns: true if the PCI device is PCI Express capable, false otherwise.
*/
static inline bool pci_is_pcie(struct pci_dev *dev)
{
672,6 → 703,11
 
#define pci_name(x) "radeon"
 
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
return pdev->resource[bar].start;
}
 
#endif //__PCI__H__
 
 
/drivers/include/linux/rbtree.h
85,6 → 85,11
*rb_link = node;
}
 
#define rb_entry_safe(ptr, type, member) \
({ typeof(ptr) ____ptr = (ptr); \
____ptr ? rb_entry(____ptr, type, member) : NULL; \
})
 
/**
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of
* given type safe against removal of rb_node entry
95,12 → 100,9
* @field: the name of the rb_node field within 'type'.
*/
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\
n = rb_entry(rb_next_postorder(&pos->field), \
typeof(*pos), field); \
&pos->field; \
pos = n, \
n = rb_entry(rb_next_postorder(&pos->field), \
typeof(*pos), field))
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
typeof(*pos), field); 1; }); \
pos = n)
 
#endif /* _LINUX_RBTREE_H */
/drivers/include/linux/rculist.h
40,7 → 40,7
next->prev = new;
}
#else
extern void __list_add_rcu(struct list_head *new,
void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next);
#endif
 
191,7 → 191,11
if (list_empty(list))
return;
 
/* "first" and "last" tracking list, so initialize it. */
/*
* "first" and "last" tracking list, so initialize it. RCU readers
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
* instead of INIT_LIST_HEAD().
*/
 
INIT_LIST_HEAD(list);
 
228,7 → 232,8
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_entry_rcu(ptr, type, member) \
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \
({ \
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \
})
 
266,10 → 271,10
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
#define list_first_or_null_rcu(ptr, type, member) \
({struct list_head *__ptr = (ptr); \
({ \
struct list_head *__ptr = (ptr); \
struct list_head *__next = ACCESS_ONCE(__ptr->next); \
likely(__ptr != __next) ? \
list_entry_rcu(__next, type, member) : NULL; \
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
})
 
/**
412,9 → 417,9
}
 
/**
* hlist_add_after_rcu
* hlist_add_behind_rcu
* @n: the new element to add to the hash list.
* @prev: the existing element to add the new element after.
* @n: the new element to add to the hash list.
*
* Description:
* Adds the specified element to the specified hlist
429,8 → 434,8
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs.
*/
static inline void hlist_add_after_rcu(struct hlist_node *prev,
struct hlist_node *n)
static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
n->pprev = &prev->next;
/drivers/include/linux/reservation.h
0,0 → 1,62
/*
* Header file for reservations for dma-buf and ttm
*
* Copyright(C) 2011 Linaro Limited. All rights reserved.
* Copyright (C) 2012-2013 Canonical Ltd
* Copyright (C) 2012 Texas Instruments
*
* Authors:
* Rob Clark <robdclark@gmail.com>
* Maarten Lankhorst <maarten.lankhorst@canonical.com>
* Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*
* Based on bo.c which bears the following copyright notice,
* but is dual licensed:
*
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef _LINUX_RESERVATION_H
#define _LINUX_RESERVATION_H
 
#include <linux/ww_mutex.h>
 
extern struct ww_class reservation_ww_class;
 
struct reservation_object {
struct ww_mutex lock;
};
 
static inline void
reservation_object_init(struct reservation_object *obj)
{
ww_mutex_init(&obj->lock, &reservation_ww_class);
}
 
static inline void
reservation_object_fini(struct reservation_object *obj)
{
ww_mutex_destroy(&obj->lock);
}
 
#endif /* _LINUX_RESERVATION_H */
/drivers/include/linux/scatterlist.h
101,19 → 101,6
return (struct page *)((sg)->page_link & ~0x3);
}
 
/**
* sg_set_buf - Set sg entry to point at given data
* @sg: SG entry
* @buf: Data
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
 
/*
* Loop over each sg element, following the pointer to a new list if necessary
*/
226,10 → 213,10
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
 
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *);
void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int,
struct scatterlist *, gfp_t, sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
241,6 → 228,11
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
 
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, off_t skip);
 
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.
/drivers/include/linux/sched.h
3,6 → 3,8
 
 
#define TASK_UNINTERRUPTIBLE 2
/* Task command name length */
#define TASK_COMM_LEN 16
 
#define schedule_timeout(x) delay(x)
 
/drivers/include/linux/string.h
51,6 → 51,9
#ifndef __HAVE_ARCH_STRCHR
extern char * strchr(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRCHRNUL
extern char * strchrnul(const char *,int);
#endif
#ifndef __HAVE_ARCH_STRNCHR
extern char * strnchr(const char *, size_t, int);
#endif
/drivers/include/linux/types.h
116,11 → 116,12
typedef __u16 uint16_t;
typedef __u32 uint32_t;
 
#if defined(__GNUC__)
typedef __u64 uint64_t;
typedef __u64 u_int64_t;
typedef __s64 int64_t;
#endif
 
typedef __signed__ long long int64_t;
 
/* this is a special 64bit data type that is 8-byte aligned */
#define aligned_u64 __u64 __attribute__((aligned(8)))
#define aligned_be64 __be64 __attribute__((aligned(8)))
150,6 → 151,7
#define pgoff_t unsigned long
#endif
 
/* A dma_addr_t can hold any valid DMA or bus address for the platform */
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef u64 dma_addr_t;
#else
200,6 → 202,7
#ifdef __KERNEL__
typedef unsigned __bitwise__ gfp_t;
typedef unsigned __bitwise__ fmode_t;
typedef unsigned __bitwise__ oom_flags_t;
 
#ifdef CONFIG_PHYS_ADDR_T_64BIT
typedef u64 phys_addr_t;
209,6 → 212,12
 
typedef phys_addr_t resource_size_t;
 
/*
* This type is the placeholder for a hardware interrupt number. It has to be
* big enough to enclose whatever representation is used by a given platform.
*/
typedef unsigned long irq_hw_number_t;
 
typedef struct {
int counter;
} atomic_t;
/drivers/include/linux/uapi/drm/drm.h
39,7 → 39,7
#if defined(__KERNEL__) || defined(__linux__)
 
#include <linux/types.h>
#include <asm/ioctl.h>
//#include <asm/ioctl.h>
typedef unsigned int drm_handle_t;
 
#else /* One of the BSDs */
619,6 → 619,17
#define DRM_PRIME_CAP_EXPORT 0x2
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/*
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
* combination for the hardware cursor. The intention is that a hardware
* agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
#define DRM_CAP_CURSOR_HEIGHT 0x9
 
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
635,6 → 646,14
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
 
/**
* DRM_CLIENT_CAP_UNIVERSAL_PLANES
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
 
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
761,7 → 780,7
 
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
/drivers/include/linux/uapi/drm/drm_fourcc.h
0,0 → 1,135
/*
* Copyright 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#ifndef DRM_FOURCC_H
#define DRM_FOURCC_H
 
#include <linux/types.h>
 
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
 
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
 
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
 
/* 16 bpp RGB */
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
 
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
 
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
 
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
 
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
 
/* 24 bpp RGB */
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
 
/* 32 bpp RGB */
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
 
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
 
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
 
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
 
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
 
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
 
/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
* or
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
*/
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
* index 2: Cr plane, [7:0] Cr
* or
* index 1: Cr plane, [7:0] Cr
* index 2: Cb plane, [7:0] Cb
*/
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
#endif /* DRM_FOURCC_H */
/drivers/include/linux/uapi/drm/drm_mode.h
0,0 → 1,520
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* Copyright (c) 2007-2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
 
#include <linux/types.h>
 
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
 
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
 
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
/*
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX
* (define not exposed to user space).
*/
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
#define DRM_MODE_FLAG_3D_NONE (0<<14)
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
 
 
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
 
/* Scaling mode options */
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
software can still scale) */
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
 
/* Picture aspect ratio options */
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
 
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_DITHERING_AUTO 2
 
/* Dirty info options */
#define DRM_MODE_DIRTY_OFF 0
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
 
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
 
__u32 vrefresh;
 
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
 
struct drm_mode_card_res {
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
 
struct drm_mode_crtc {
__u64 set_connectors_ptr;
__u32 count_connectors;
 
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
 
__u32 x, y; /**< Position on the frameuffer */
 
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
 
/* Planes blend with or override other bits on the CRTC */
struct drm_mode_set_plane {
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id; /* fb object contains surface format type */
__u32 flags; /* see above flags */
 
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
 
/* Source values are 16.16 fixed point */
__u32 src_x, src_y;
__u32 src_h, src_w;
};
 
struct drm_mode_get_plane {
__u32 plane_id;
 
__u32 crtc_id;
__u32 fb_id;
 
__u32 possible_crtcs;
__u32 gamma_size;
 
__u32 count_format_types;
__u64 format_type_ptr;
};
 
struct drm_mode_get_plane_res {
__u64 plane_id_ptr;
__u32 count_planes;
};
 
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
#define DRM_MODE_ENCODER_DSI 6
#define DRM_MODE_ENCODER_DPMST 7
 
struct drm_mode_get_encoder {
__u32 encoder_id;
__u32 encoder_type;
 
__u32 crtc_id; /**< Id of crtc */
 
__u32 possible_crtcs;
__u32 possible_clones;
};
 
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_SUBCONNECTOR_SCART 9
 
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
 
struct drm_mode_get_connector {
 
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
 
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
 
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
 
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
 
__u32 pad;
};
 
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
 
/* non-extended types: legacy bitmask, one bit per type: */
#define DRM_MODE_PROP_LEGACY_TYPE ( \
DRM_MODE_PROP_RANGE | \
DRM_MODE_PROP_ENUM | \
DRM_MODE_PROP_BLOB | \
DRM_MODE_PROP_BITMASK)
 
/* extended-types: rather than continue to consume a bit per type,
* grab a chunk of the bits to use as integer type id.
*/
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0
#define DRM_MODE_PROP_TYPE(n) ((n) << 6)
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1)
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2)
 
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
 
struct drm_mode_get_property {
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
 
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
 
__u32 count_values;
__u32 count_enum_blobs;
};
 
struct drm_mode_connector_set_property {
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
 
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_obj_set_property {
__u64 value;
__u32 prop_id;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
__u64 data;
};
 
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
__u32 handle;
};
 
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
 
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
 
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offset[0] and UV as
* offeset[1]. Note that offset[0] will generally
* be 0.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
};
 
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
 
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
 
/*
* Mark a region of a framebuffer as dirty.
*
* Some hardware does not automatically update display contents
* as a hardware or software draw to a framebuffer. This ioctl
* allows userspace to tell the kernel and the hardware what
* regions of the framebuffer have changed.
*
* The kernel or hardware is free to update more then just the
* region specified by the clip rects. The kernel or hardware
* may also delay and/or coalesce several calls to dirty into a
* single update.
*
* Userspace may annotate the updates, the annotates are a
* promise made by the caller that the change is either a copy
* of pixels or a fill of a single color in the region specified.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
* the number of updated regions are half of num_clips given,
* where the clip rects are paired in src and dst. The width and
* height of each one of the pairs must match.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
* promises that the region specified of the clip rects is filled
* completely with a single color as given in the color argument.
*/
 
struct drm_mode_fb_dirty_cmd {
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
 
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_CURSOR_BO 0x01
#define DRM_MODE_CURSOR_MOVE 0x02
#define DRM_MODE_CURSOR_FLAGS 0x03
 
/*
* depending on the value in flags different members are used.
*
* CURSOR_BO uses
* crtc_id
* width
* height
* handle - if 0 turns the cursor off
*
* CURSOR_MOVE uses
* crtc_id
* x
* y
*/
struct drm_mode_cursor {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
};
 
struct drm_mode_cursor2 {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
__s32 hot_x;
__s32 hot_y;
};
 
struct drm_mode_crtc_lut {
__u32 crtc_id;
__u32 gamma_size;
 
/* pointers to arrays */
__u64 red;
__u64 green;
__u64 blue;
};
 
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
 
/*
* Request a page flip on the specified crtc.
*
* This ioctl will ask KMS to schedule a page flip for the specified
* crtc. Once any pending rendering targeting the specified fb (as of
* ioctl time) has completed, the crtc will be reprogrammed to display
* that fb after the next vertical refresh. The ioctl returns
* immediately, but subsequent rendering to the current fb will block
* in the execbuffer ioctl until the page flip happens. If a page
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
* Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
* event (see drm.h: struct drm_event_vblank) when the page flip is
* done. The user_data field passed in with this ioctl will be
* returned as the user_data field in the vblank event struct.
*
* Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
* 'as soon as possible', meaning that it not delay waiting for vblank.
* This may cause tearing on the screen.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
*/
 
struct drm_mode_crtc_page_flip {
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
 
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
uint32_t height;
uint32_t width;
uint32_t bpp;
uint32_t flags;
/* handle, pitch, size will be returned */
uint32_t handle;
uint32_t pitch;
uint64_t size;
};
 
/* set up for mmap of a dumb scanout buffer */
struct drm_mode_map_dumb {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_mode_destroy_dumb {
uint32_t handle;
};
 
#endif
/drivers/include/linux/uapi/drm/i915_drm.h
223,6 → 223,7
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
#define DRM_I915_GEM_USERPTR 0x33
 
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
273,6 → 274,7
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
337,6 → 339,7
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
#define I915_PARAM_HAS_WT 27
#define I915_PARAM_CMD_PARSER_VERSION 28
 
typedef struct drm_i915_getparam {
int param;
1049,6 → 1052,20
__u32 pad;
};
 
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
};
 
struct drm_i915_mask {
__u32 handle;
__u32 width;
/drivers/include/linux/uapi/drm/radeon_drm.h
0,0 → 1,1040
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*-
*
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Fremont, California.
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Keith Whitwell <keith@tungstengraphics.com>
*/
 
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
 
#include <drm/drm.h>
 
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
*/
#ifndef __RADEON_SAREA_DEFINES__
#define __RADEON_SAREA_DEFINES__
 
/* Old style state flags, required for sarea interface (1.1 and 1.2
* clears) and 1.2 drm_vertex2 ioctl.
*/
#define RADEON_UPLOAD_CONTEXT 0x00000001
#define RADEON_UPLOAD_VERTFMT 0x00000002
#define RADEON_UPLOAD_LINE 0x00000004
#define RADEON_UPLOAD_BUMPMAP 0x00000008
#define RADEON_UPLOAD_MASKS 0x00000010
#define RADEON_UPLOAD_VIEWPORT 0x00000020
#define RADEON_UPLOAD_SETUP 0x00000040
#define RADEON_UPLOAD_TCL 0x00000080
#define RADEON_UPLOAD_MISC 0x00000100
#define RADEON_UPLOAD_TEX0 0x00000200
#define RADEON_UPLOAD_TEX1 0x00000400
#define RADEON_UPLOAD_TEX2 0x00000800
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */
#define RADEON_REQUIRE_QUIESCENCE 0x00010000
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */
#define RADEON_UPLOAD_ALL 0x003effff
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff
 
/* New style per-packet identifiers for use in cmd_buffer ioctl with
* the RADEON_EMIT_PACKET command. Comments relate new packets to old
* state bits and the packet size:
*/
#define RADEON_EMIT_PP_MISC 0 /* context/7 */
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */
#define R200_EMIT_VAP_CTL 32 /* vap/1 */
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */
#define R200_EMIT_PP_CUBIC_FACES_0 61
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62
#define R200_EMIT_PP_CUBIC_FACES_1 63
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64
#define R200_EMIT_PP_CUBIC_FACES_2 65
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66
#define R200_EMIT_PP_CUBIC_FACES_3 67
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68
#define R200_EMIT_PP_CUBIC_FACES_4 69
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70
#define R200_EMIT_PP_CUBIC_FACES_5 71
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72
#define RADEON_EMIT_PP_TEX_SIZE_0 73
#define RADEON_EMIT_PP_TEX_SIZE_1 74
#define RADEON_EMIT_PP_TEX_SIZE_2 75
#define R200_EMIT_RB3D_BLENDCOLOR 76
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77
#define RADEON_EMIT_PP_CUBIC_FACES_0 78
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79
#define RADEON_EMIT_PP_CUBIC_FACES_1 80
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81
#define RADEON_EMIT_PP_CUBIC_FACES_2 82
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83
#define R200_EMIT_PP_TRI_PERF_CNTL 84
#define R200_EMIT_PP_AFS_0 85
#define R200_EMIT_PP_AFS_1 86
#define R200_EMIT_ATF_TFACTOR 87
#define R200_EMIT_PP_TXCTLALL_0 88
#define R200_EMIT_PP_TXCTLALL_1 89
#define R200_EMIT_PP_TXCTLALL_2 90
#define R200_EMIT_PP_TXCTLALL_3 91
#define R200_EMIT_PP_TXCTLALL_4 92
#define R200_EMIT_PP_TXCTLALL_5 93
#define R200_EMIT_VAP_PVS_CNTL 94
#define RADEON_MAX_STATE_PACKETS 95
 
/* Commands understood by cmd_buffer ioctl. More can be added but
* obviously these can't be removed or changed:
*/
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */
#define RADEON_CMD_SCALARS 2 /* emit scalar data */
#define RADEON_CMD_VECTORS 3 /* emit vector data */
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */
#define RADEON_CMD_PACKET3 5 /* emit hw packet */
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note:
* doesn't make the cpu wait, just
* the graphics hardware */
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */
 
typedef union {
int i;
struct {
unsigned char cmd_type, pad0, pad1, pad2;
} header;
struct {
unsigned char cmd_type, packet_id, pad0, pad1;
} packet;
struct {
unsigned char cmd_type, offset, stride, count;
} scalars;
struct {
unsigned char cmd_type, offset, stride, count;
} vectors;
struct {
unsigned char cmd_type, addr_lo, addr_hi, count;
} veclinear;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
} drm_radeon_cmd_header_t;
 
#define RADEON_WAIT_2D 0x1
#define RADEON_WAIT_3D 0x2
 
/* Allowed parameters for R300_CMD_PACKET3
*/
#define R300_CMD_PACKET3_CLEAR 0
#define R300_CMD_PACKET3_RAW 1
 
/* Commands understood by cmd_buffer ioctl for R300.
* The interface has not been stabilized, so some of these may be removed
* and eventually reordered before stabilization.
*/
#define R300_CMD_PACKET0 1
#define R300_CMD_VPU 2 /* emit vertex program upload */
#define R300_CMD_PACKET3 3 /* emit a packet3 */
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */
#define R300_CMD_CP_DELAY 5
#define R300_CMD_DMA_DISCARD 6
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
/* these two defines are DOING IT WRONG - however
* we have userspace which relies on using these.
* The wait interface is backwards compat new
* code should use the NEW_WAIT defines below
* THESE ARE NOT BIT FIELDS
*/
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
 
# define R300_NEW_WAIT_2D_3D 0x3
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
 
#define R300_CMD_SCRATCH 8
#define R300_CMD_R500FP 9
 
typedef union {
unsigned int u;
struct {
unsigned char cmd_type, pad0, pad1, pad2;
} header;
struct {
unsigned char cmd_type, count, reglo, reghi;
} packet0;
struct {
unsigned char cmd_type, count, adrlo, adrhi;
} vpu;
struct {
unsigned char cmd_type, packet, pad0, pad1;
} packet3;
struct {
unsigned char cmd_type, packet;
unsigned short count; /* amount of packet2 to emit */
} delay;
struct {
unsigned char cmd_type, buf_idx, pad0, pad1;
} dma;
struct {
unsigned char cmd_type, flags, pad0, pad1;
} wait;
struct {
unsigned char cmd_type, reg, n_bufs, flags;
} scratch;
struct {
unsigned char cmd_type, count, adrlo, adrhi_flags;
} r500fp;
} drm_r300_cmd_header_t;
 
#define RADEON_FRONT 0x1
#define RADEON_BACK 0x2
#define RADEON_DEPTH 0x4
#define RADEON_STENCIL 0x8
#define RADEON_CLEAR_FASTZ 0x80000000
#define RADEON_USE_HIERZ 0x40000000
#define RADEON_USE_COMP_ZBUF 0x20000000
 
#define R500FP_CONSTANT_TYPE (1 << 1)
#define R500FP_CONSTANT_CLAMP (1 << 2)
 
/* Primitive types
*/
#define RADEON_POINTS 0x1
#define RADEON_LINES 0x2
#define RADEON_LINE_STRIP 0x3
#define RADEON_TRIANGLES 0x4
#define RADEON_TRIANGLE_FAN 0x5
#define RADEON_TRIANGLE_STRIP 0x6
 
/* Vertex/indirect buffer size
*/
#define RADEON_BUFFER_SIZE 65536
 
/* Byte offsets for indirect buffer data
*/
#define RADEON_INDEX_PRIM_OFFSET 20
 
#define RADEON_SCRATCH_REG_OFFSET 32
 
#define R600_SCRATCH_REG_OFFSET 256
 
#define RADEON_NR_SAREA_CLIPRECTS 12
 
/* There are 2 heaps (local/GART). Each region within a heap is a
* minimum of 64k, and there are at most 64 of them per heap.
*/
#define RADEON_LOCAL_TEX_HEAP 0
#define RADEON_GART_TEX_HEAP 1
#define RADEON_NR_TEX_HEAPS 2
#define RADEON_NR_TEX_REGIONS 64
#define RADEON_LOG_TEX_GRANULARITY 16
 
#define RADEON_MAX_TEXTURE_LEVELS 12
#define RADEON_MAX_TEXTURE_UNITS 3
 
#define RADEON_MAX_SURFACES 8
 
/* Blits have strict offset rules. All blit offset must be aligned on
* a 1K-byte boundary.
*/
#define RADEON_OFFSET_SHIFT 10
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT)
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1)
 
#endif /* __RADEON_SAREA_DEFINES__ */
 
typedef struct {
unsigned int red;
unsigned int green;
unsigned int blue;
unsigned int alpha;
} radeon_color_regs_t;
 
typedef struct {
/* Context state */
unsigned int pp_misc; /* 0x1c14 */
unsigned int pp_fog_color;
unsigned int re_solid_color;
unsigned int rb3d_blendcntl;
unsigned int rb3d_depthoffset;
unsigned int rb3d_depthpitch;
unsigned int rb3d_zstencilcntl;
 
unsigned int pp_cntl; /* 0x1c38 */
unsigned int rb3d_cntl;
unsigned int rb3d_coloroffset;
unsigned int re_width_height;
unsigned int rb3d_colorpitch;
unsigned int se_cntl;
 
/* Vertex format state */
unsigned int se_coord_fmt; /* 0x1c50 */
 
/* Line state */
unsigned int re_line_pattern; /* 0x1cd0 */
unsigned int re_line_state;
 
unsigned int se_line_width; /* 0x1db8 */
 
/* Bumpmap state */
unsigned int pp_lum_matrix; /* 0x1d00 */
 
unsigned int pp_rot_matrix_0; /* 0x1d58 */
unsigned int pp_rot_matrix_1;
 
/* Mask state */
unsigned int rb3d_stencilrefmask; /* 0x1d7c */
unsigned int rb3d_ropcntl;
unsigned int rb3d_planemask;
 
/* Viewport state */
unsigned int se_vport_xscale; /* 0x1d98 */
unsigned int se_vport_xoffset;
unsigned int se_vport_yscale;
unsigned int se_vport_yoffset;
unsigned int se_vport_zscale;
unsigned int se_vport_zoffset;
 
/* Setup state */
unsigned int se_cntl_status; /* 0x2140 */
 
/* Misc state */
unsigned int re_top_left; /* 0x26c0 */
unsigned int re_misc;
} drm_radeon_context_regs_t;
 
typedef struct {
/* Zbias state */
unsigned int se_zbias_factor; /* 0x1dac */
unsigned int se_zbias_constant;
} drm_radeon_context2_regs_t;
 
/* Setup registers for each texture unit
*/
typedef struct {
unsigned int pp_txfilter;
unsigned int pp_txformat;
unsigned int pp_txoffset;
unsigned int pp_txcblend;
unsigned int pp_txablend;
unsigned int pp_tfactor;
unsigned int pp_border_color;
} drm_radeon_texture_regs_t;
 
typedef struct {
unsigned int start;
unsigned int finish;
unsigned int prim:8;
unsigned int stateidx:8;
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */
unsigned int vc_format; /* vertex format */
} drm_radeon_prim_t;
 
typedef struct {
drm_radeon_context_regs_t context;
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS];
drm_radeon_context2_regs_t context2;
unsigned int dirty;
} drm_radeon_state_t;
 
typedef struct {
/* The channel for communication of state information to the
* kernel on firing a vertex buffer with either of the
* obsoleted vertex/index ioctls.
*/
drm_radeon_context_regs_t context_state;
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS];
unsigned int dirty;
unsigned int vertsize;
unsigned int vc_format;
 
/* The current cliprects, or a subset thereof.
*/
struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
 
/* Counters for client-side throttling of rendering clients.
*/
unsigned int last_frame;
unsigned int last_dispatch;
unsigned int last_clear;
 
struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
1];
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
int pfState; /* number of 3d windows (0,1,2ormore) */
int pfCurrentPage; /* which buffer is being displayed? */
int crtc2_base; /* CRTC2 frame offset */
int tiling_enabled; /* set by drm, read by 2d + 3d clients */
} drm_radeon_sarea_t;
 
/* WARNING: If you change any of these defines, make sure to change the
* defines in the Xserver file (xf86drmRadeon.h)
*
* KW: actually it's illegal to change any of this (backwards compatibility).
*/
 
/* Radeon specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_RADEON_CP_INIT 0x00
#define DRM_RADEON_CP_START 0x01
#define DRM_RADEON_CP_STOP 0x02
#define DRM_RADEON_CP_RESET 0x03
#define DRM_RADEON_CP_IDLE 0x04
#define DRM_RADEON_RESET 0x05
#define DRM_RADEON_FULLSCREEN 0x06
#define DRM_RADEON_SWAP 0x07
#define DRM_RADEON_CLEAR 0x08
#define DRM_RADEON_VERTEX 0x09
#define DRM_RADEON_INDICES 0x0A
#define DRM_RADEON_NOT_USED
#define DRM_RADEON_STIPPLE 0x0C
#define DRM_RADEON_INDIRECT 0x0D
#define DRM_RADEON_TEXTURE 0x0E
#define DRM_RADEON_VERTEX2 0x0F
#define DRM_RADEON_CMDBUF 0x10
#define DRM_RADEON_GETPARAM 0x11
#define DRM_RADEON_FLIP 0x12
#define DRM_RADEON_ALLOC 0x13
#define DRM_RADEON_FREE 0x14
#define DRM_RADEON_INIT_HEAP 0x15
#define DRM_RADEON_IRQ_EMIT 0x16
#define DRM_RADEON_IRQ_WAIT 0x17
#define DRM_RADEON_CP_RESUME 0x18
#define DRM_RADEON_SETPARAM 0x19
#define DRM_RADEON_SURF_ALLOC 0x1a
#define DRM_RADEON_SURF_FREE 0x1b
/* KMS ioctl */
#define DRM_RADEON_GEM_INFO 0x1c
#define DRM_RADEON_GEM_CREATE 0x1d
#define DRM_RADEON_GEM_MMAP 0x1e
#define DRM_RADEON_GEM_PREAD 0x21
#define DRM_RADEON_GEM_PWRITE 0x22
#define DRM_RADEON_GEM_SET_DOMAIN 0x23
#define DRM_RADEON_GEM_WAIT_IDLE 0x24
#define DRM_RADEON_CS 0x26
#define DRM_RADEON_INFO 0x27
#define DRM_RADEON_GEM_SET_TILING 0x28
#define DRM_RADEON_GEM_GET_TILING 0x29
#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_RADEON_GEM_VA 0x2b
#define DRM_RADEON_GEM_OP 0x2c
 
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t)
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET)
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE)
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET)
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t)
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP)
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t)
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t)
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t)
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t)
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t)
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t)
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t)
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t)
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t)
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP)
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t)
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t)
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t)
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t)
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t)
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME)
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t)
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t)
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t)
/* KMS */
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info)
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling)
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling)
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy)
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va)
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op)
 
typedef struct drm_radeon_init {
enum {
RADEON_INIT_CP = 0x01,
RADEON_CLEANUP_CP = 0x02,
RADEON_INIT_R200_CP = 0x03,
RADEON_INIT_R300_CP = 0x04,
RADEON_INIT_R600_CP = 0x05
} func;
unsigned long sarea_priv_offset;
int is_pci;
int cp_mode;
int gart_size;
int ring_size;
int usec_timeout;
 
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
 
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long gart_textures_offset;
} drm_radeon_init_t;
 
typedef struct drm_radeon_cp_stop {
int flush;
int idle;
} drm_radeon_cp_stop_t;
 
typedef struct drm_radeon_fullscreen {
enum {
RADEON_INIT_FULLSCREEN = 0x01,
RADEON_CLEANUP_FULLSCREEN = 0x02
} func;
} drm_radeon_fullscreen_t;
 
#define CLEAR_X1 0
#define CLEAR_Y1 1
#define CLEAR_X2 2
#define CLEAR_Y2 3
#define CLEAR_DEPTH 4
 
typedef union drm_radeon_clear_rect {
float f[5];
unsigned int ui[5];
} drm_radeon_clear_rect_t;
 
typedef struct drm_radeon_clear {
unsigned int flags;
unsigned int clear_color;
unsigned int clear_depth;
unsigned int color_mask;
unsigned int depth_mask; /* misnamed field: should be stencil */
drm_radeon_clear_rect_t __user *depth_boxes;
} drm_radeon_clear_t;
 
typedef struct drm_radeon_vertex {
int prim;
int idx; /* Index of vertex buffer */
int count; /* Number of vertices in buffer */
int discard; /* Client finished with buffer? */
} drm_radeon_vertex_t;
 
typedef struct drm_radeon_indices {
int prim;
int idx;
int start;
int end;
int discard; /* Client finished with buffer? */
} drm_radeon_indices_t;
 
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices
* - allows multiple primitives and state changes in a single ioctl
* - supports driver change to emit native primitives
*/
typedef struct drm_radeon_vertex2 {
int idx; /* Index of vertex buffer */
int discard; /* Client finished with buffer? */
int nr_states;
drm_radeon_state_t __user *state;
int nr_prims;
drm_radeon_prim_t __user *prim;
} drm_radeon_vertex2_t;
 
/* v1.3 - obsoletes drm_radeon_vertex2
* - allows arbitrarily large cliprect list
* - allows updating of tcl packet, vector and scalar state
* - allows memory-efficient description of state updates
* - allows state to be emitted without a primitive
* (for clears, ctx switches)
* - allows more than one dma buffer to be referenced per ioctl
* - supports tcl driver
* - may be extended in future versions with new cmd types, packets
*/
typedef struct drm_radeon_cmd_buffer {
int bufsz;
char __user *buf;
int nbox;
struct drm_clip_rect __user *boxes;
} drm_radeon_cmd_buffer_t;
 
typedef struct drm_radeon_tex_image {
unsigned int x, y; /* Blit coordinates */
unsigned int width, height;
const void __user *data;
} drm_radeon_tex_image_t;
 
typedef struct drm_radeon_texture {
unsigned int offset;
int pitch;
int format;
int width; /* Texture image coordinates */
int height;
drm_radeon_tex_image_t __user *image;
} drm_radeon_texture_t;
 
typedef struct drm_radeon_stipple {
unsigned int __user *mask;
} drm_radeon_stipple_t;
 
typedef struct drm_radeon_indirect {
int idx;
int start;
int end;
int discard;
} drm_radeon_indirect_t;
 
/* enum for card type parameters */
#define RADEON_CARD_PCI 0
#define RADEON_CARD_AGP 1
#define RADEON_CARD_PCIE 2
 
/* 1.3: An ioctl to get parameters that aren't available to the 3d
* client any other way.
*/
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */
#define RADEON_PARAM_LAST_FRAME 2
#define RADEON_PARAM_LAST_DISPATCH 3
#define RADEON_PARAM_LAST_CLEAR 4
/* Added with DRM version 1.6. */
#define RADEON_PARAM_IRQ_NR 5
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */
/* Added with DRM version 1.8. */
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */
#define RADEON_PARAM_STATUS_HANDLE 8
#define RADEON_PARAM_SAREA_HANDLE 9
#define RADEON_PARAM_GART_TEX_HANDLE 10
#define RADEON_PARAM_SCRATCH_OFFSET 11
#define RADEON_PARAM_CARD_TYPE 12
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */
#define RADEON_PARAM_DEVICE_ID 16
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */
 
typedef struct drm_radeon_getparam {
int param;
void __user *value;
} drm_radeon_getparam_t;
 
/* 1.6: Set up a memory manager for regions of shared memory:
*/
#define RADEON_MEM_REGION_GART 1
#define RADEON_MEM_REGION_FB 2
 
typedef struct drm_radeon_mem_alloc {
int region;
int alignment;
int size;
int __user *region_offset; /* offset from start of fb or GART */
} drm_radeon_mem_alloc_t;
 
typedef struct drm_radeon_mem_free {
int region;
int region_offset;
} drm_radeon_mem_free_t;
 
typedef struct drm_radeon_mem_init_heap {
int region;
int size;
int start;
} drm_radeon_mem_init_heap_t;
 
/* 1.6: Userspace can request & wait on irq's:
*/
typedef struct drm_radeon_irq_emit {
int __user *irq_seq;
} drm_radeon_irq_emit_t;
 
typedef struct drm_radeon_irq_wait {
int irq_seq;
} drm_radeon_irq_wait_t;
 
/* 1.10: Clients tell the DRM where they think the framebuffer is located in
* the card's address space, via a new generic ioctl to set parameters
*/
 
typedef struct drm_radeon_setparam {
unsigned int param;
__s64 value;
} drm_radeon_setparam_t;
 
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */
/* 1.14: Clients can allocate/free a surface
*/
typedef struct drm_radeon_surface_alloc {
unsigned int address;
unsigned int size;
unsigned int flags;
} drm_radeon_surface_alloc_t;
 
typedef struct drm_radeon_surface_free {
unsigned int address;
} drm_radeon_surface_free_t;
 
#define DRM_RADEON_VBLANK_CRTC1 1
#define DRM_RADEON_VBLANK_CRTC2 2
 
/*
* Kernel modesetting world below.
*/
#define RADEON_GEM_DOMAIN_CPU 0x1
#define RADEON_GEM_DOMAIN_GTT 0x2
#define RADEON_GEM_DOMAIN_VRAM 0x4
 
struct drm_radeon_gem_info {
uint64_t gart_size;
uint64_t vram_size;
uint64_t vram_visible;
};
 
#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
#define RADEON_GEM_GTT_UC (1 << 1)
#define RADEON_GEM_GTT_WC (1 << 2)
 
struct drm_radeon_gem_create {
uint64_t size;
uint64_t alignment;
uint32_t handle;
uint32_t initial_domain;
uint32_t flags;
};
 
#define RADEON_TILING_MACRO 0x1
#define RADEON_TILING_MICRO 0x2
#define RADEON_TILING_SWAP_16BIT 0x4
#define RADEON_TILING_SWAP_32BIT 0x8
/* this object requires a surface when mapped - i.e. front buffer */
#define RADEON_TILING_SURFACE 0x10
#define RADEON_TILING_MICRO_SQUARE 0x20
#define RADEON_TILING_EG_BANKW_SHIFT 8
#define RADEON_TILING_EG_BANKW_MASK 0xf
#define RADEON_TILING_EG_BANKH_SHIFT 12
#define RADEON_TILING_EG_BANKH_MASK 0xf
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
 
struct drm_radeon_gem_set_tiling {
uint32_t handle;
uint32_t tiling_flags;
uint32_t pitch;
};
 
struct drm_radeon_gem_get_tiling {
uint32_t handle;
uint32_t tiling_flags;
uint32_t pitch;
};
 
struct drm_radeon_gem_mmap {
uint32_t handle;
uint32_t pad;
uint64_t offset;
uint64_t size;
uint64_t addr_ptr;
};
 
struct drm_radeon_gem_set_domain {
uint32_t handle;
uint32_t read_domains;
uint32_t write_domain;
};
 
struct drm_radeon_gem_wait_idle {
uint32_t handle;
uint32_t pad;
};
 
struct drm_radeon_gem_busy {
uint32_t handle;
uint32_t domain;
};
 
struct drm_radeon_gem_pread {
/** Handle for the object being read. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to read from */
uint64_t offset;
/** Length of data to read */
uint64_t size;
/** Pointer to write the data into. */
/* void *, but pointers are not 32/64 compatible */
uint64_t data_ptr;
};
 
struct drm_radeon_gem_pwrite {
/** Handle for the object being written to. */
uint32_t handle;
uint32_t pad;
/** Offset into the object to write to */
uint64_t offset;
/** Length of data to write */
uint64_t size;
/** Pointer to read the data from. */
/* void *, but pointers are not 32/64 compatible */
uint64_t data_ptr;
};
 
/* Sets or returns a value associated with a buffer. */
struct drm_radeon_gem_op {
uint32_t handle; /* buffer */
uint32_t op; /* RADEON_GEM_OP_* */
uint64_t value; /* input or return value */
};
 
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0
#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1
 
#define RADEON_VA_MAP 1
#define RADEON_VA_UNMAP 2
 
#define RADEON_VA_RESULT_OK 0
#define RADEON_VA_RESULT_ERROR 1
#define RADEON_VA_RESULT_VA_EXIST 2
 
#define RADEON_VM_PAGE_VALID (1 << 0)
#define RADEON_VM_PAGE_READABLE (1 << 1)
#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
#define RADEON_VM_PAGE_SYSTEM (1 << 3)
#define RADEON_VM_PAGE_SNOOPED (1 << 4)
 
struct drm_radeon_gem_va {
uint32_t handle;
uint32_t operation;
uint32_t vm_id;
uint32_t flags;
uint64_t offset;
};
 
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
#define RADEON_CHUNK_ID_FLAGS 0x03
#define RADEON_CHUNK_ID_CONST_IB 0x04
 
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
#define RADEON_CS_USE_VM 0x02
#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
#define RADEON_CS_RING_GFX 0
#define RADEON_CS_RING_COMPUTE 1
#define RADEON_CS_RING_DMA 2
#define RADEON_CS_RING_UVD 3
#define RADEON_CS_RING_VCE 4
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
/* 0 = normal, + = higher priority, - = lower priority */
 
struct drm_radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
uint64_t chunk_data;
};
 
/* drm_radeon_cs_reloc.flags */
 
struct drm_radeon_cs_reloc {
uint32_t handle;
uint32_t read_domains;
uint32_t write_domain;
uint32_t flags;
};
 
struct drm_radeon_cs {
uint32_t num_chunks;
uint32_t cs_id;
/* this points to uint64_t * which point to cs chunks */
uint64_t chunks;
/* updates to the limits after this CS ioctl */
uint64_t gart_limit;
uint64_t vram_limit;
};
 
#define RADEON_INFO_DEVICE_ID 0x00
#define RADEON_INFO_NUM_GB_PIPES 0x01
#define RADEON_INFO_NUM_Z_PIPES 0x02
#define RADEON_INFO_ACCEL_WORKING 0x03
#define RADEON_INFO_CRTC_FROM_ID 0x04
#define RADEON_INFO_ACCEL_WORKING2 0x05
#define RADEON_INFO_TILING_CONFIG 0x06
#define RADEON_INFO_WANT_HYPERZ 0x07
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */
/* virtual address start, va < start are reserved by the kernel */
#define RADEON_INFO_VA_START 0x0e
/* maximum size of ib using the virtual memory cs */
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
/* max pipes - needed for compute shaders */
#define RADEON_INFO_MAX_PIPES 0x10
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
#define RADEON_INFO_TIMESTAMP 0x11
/* max shader engines (SE) - needed for geometry shaders, etc. */
#define RADEON_INFO_MAX_SE 0x12
/* max SH per SE */
#define RADEON_INFO_MAX_SH_PER_SE 0x13
/* fast fb access is enabled */
#define RADEON_INFO_FASTFB_WORKING 0x14
/* query if a RADEON_CS_RING_* submission is supported */
#define RADEON_INFO_RING_WORKING 0x15
/* SI tile mode array */
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
/* query if CP DMA is supported on the compute ring */
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
/* CIK macrotile mode array */
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
/* query the number of render backends */
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
/* max engine clock - needed for OpenCL */
#define RADEON_INFO_MAX_SCLK 0x1a
/* version of VCE firmware */
#define RADEON_INFO_VCE_FW_VERSION 0x1b
/* version of VCE feedback */
#define RADEON_INFO_VCE_FB_VERSION 0x1c
#define RADEON_INFO_NUM_BYTES_MOVED 0x1d
#define RADEON_INFO_VRAM_USAGE 0x1e
#define RADEON_INFO_GTT_USAGE 0x1f
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
 
struct drm_radeon_info {
uint32_t request;
uint32_t pad;
uint64_t value;
};
 
/* Those correspond to the tile index to use, this is to explicitly state
* the API that is implicitly defined by the tile mode array.
*/
#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8
#define SI_TILE_MODE_COLOR_1D 13
#define SI_TILE_MODE_COLOR_1D_SCANOUT 9
#define SI_TILE_MODE_COLOR_2D_8BPP 14
#define SI_TILE_MODE_COLOR_2D_16BPP 15
#define SI_TILE_MODE_COLOR_2D_32BPP 16
#define SI_TILE_MODE_COLOR_2D_64BPP 17
#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11
#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12
#define SI_TILE_MODE_DEPTH_STENCIL_1D 4
#define SI_TILE_MODE_DEPTH_STENCIL_2D 0
#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2
 
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5
 
#endif
/drivers/include/linux/uapi/drm/vmwgfx_drm.h
87,8 → 87,18
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
 
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
*
*/
enum drm_vmw_handle_type {
DRM_VMW_HANDLE_LEGACY = 0,
DRM_VMW_HANDLE_PRIME = 1
};
 
/**
* struct drm_vmw_getparam_arg
*
* @value: Returned value. //Out
176,6 → 186,7
* struct drm_wmv_surface_arg
*
* @sid: Surface id of created surface or surface to destroy or reference.
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
*
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
184,7 → 195,7
 
struct drm_vmw_surface_arg {
int32_t sid;
uint32_t pad64;
enum drm_vmw_handle_type handle_type;
};
 
/**
/drivers/include/linux/wait.h
109,9 → 109,15
DestroyEvent(__wait.evnt); \
} while (0)
 
#define wait_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
wait_event(wq, condition); \
__ret; \
})
 
 
 
static inline
void wake_up_all(wait_queue_head_t *q)
{
/drivers/include/linux/ww_mutex.h
0,0 → 1,381
/*
* Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance
*
* Original mutex implementation started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
* Wound/wait implementation:
* Copyright (C) 2013 Canonical Ltd.
*
* This file contains the main data structure and API definitions.
*/
 
#ifndef __LINUX_WW_MUTEX_H
#define __LINUX_WW_MUTEX_H
 
#include <linux/mutex.h>
#include <syscall.h>
 
#define current (void*)GetPid()
 
struct ww_class {
atomic_long_t stamp;
struct lock_class_key acquire_key;
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
};
 
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
unsigned acquired;
#ifdef CONFIG_DEBUG_MUTEXES
unsigned done_acquire;
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
unsigned deadlock_inject_interval;
unsigned deadlock_inject_countdown;
#endif
};
 
struct ww_mutex {
struct mutex base;
struct ww_acquire_ctx *ctx;
#ifdef CONFIG_DEBUG_MUTEXES
struct ww_class *ww_class;
#endif
};
 
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \
, .ww_class = &ww_class
#else
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class)
#endif
 
#define __WW_CLASS_INITIALIZER(ww_class) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
, .mutex_name = #ww_class "_mutex" }
 
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = { \__MUTEX_INITIALIZER(lockname) } \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
 
#define DEFINE_WW_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
 
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
 
/**
* ww_mutex_init - initialize the w/w mutex
* @lock: the mutex to be initialized
* @ww_class: the w/w class the mutex should belong to
*
* Initialize the w/w mutex to unlocked state and associate it with the given
* class.
*
* It is not allowed to initialize an already locked mutex.
*/
static inline void ww_mutex_init(struct ww_mutex *lock,
struct ww_class *ww_class)
{
MutexInit(&lock->base);
lock->ctx = NULL;
#ifdef CONFIG_DEBUG_MUTEXES
lock->ww_class = ww_class;
#endif
}
 
/**
* ww_acquire_init - initialize a w/w acquire context
* @ctx: w/w acquire context to initialize
* @ww_class: w/w class of the context
*
* Initializes an context to acquire multiple mutexes of the given w/w class.
*
* Context-based w/w mutex acquiring can be done in any order whatsoever within
* a given lock class. Deadlocks will be detected and handled with the
* wait/wound logic.
*
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
* result in undetected deadlocks and is so forbidden. Mixing different contexts
* for the same w/w class when acquiring mutexes can also result in undetected
* deadlocks, and is hence also forbidden. Both types of abuse will be caught by
* enabling CONFIG_PROVE_LOCKING.
*
* Nesting of acquire contexts for _different_ w/w classes is possible, subject
* to the usual locking rules between different lock classes.
*
* An acquire context must be released with ww_acquire_fini by the same task
* before the memory is freed. It is recommended to allocate the context itself
* on the stack.
*/
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
struct ww_class *ww_class)
{
ctx->task = current;
ctx->stamp = atomic_long_inc_return(&ww_class->stamp);
ctx->acquired = 0;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
ctx->contending_lock = NULL;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx));
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name,
&ww_class->acquire_key, 0);
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_);
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
ctx->deadlock_inject_interval = 1;
ctx->deadlock_inject_countdown = ctx->stamp & 0xf;
#endif
}
 
/**
* ww_acquire_done - marks the end of the acquire phase
* @ctx: the acquire context
*
* Marks the end of the acquire phase, any further w/w mutex lock calls using
* this context are forbidden.
*
* Calling this function is optional, it is just useful to document w/w mutex
* code and clearly designated the acquire phase from actually using the locked
* data structures.
*/
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
lockdep_assert_held(ctx);
 
DEBUG_LOCKS_WARN_ON(ctx->done_acquire);
ctx->done_acquire = 1;
#endif
}
 
/**
* ww_acquire_fini - releases a w/w acquire context
* @ctx: the acquire context to free
*
* Releases a w/w acquire context. This must be called _after_ all acquired w/w
* mutexes have been released with ww_mutex_unlock.
*/
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
mutex_release(&ctx->dep_map, 0, _THIS_IP_);
 
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!config_enabled(CONFIG_PROVE_LOCKING))
/*
* lockdep will normally handle this,
* but fail without anyway
*/
ctx->done_acquire = 1;
 
if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
/* ensure ww_acquire_fini will still fail if called twice */
ctx->acquired = ~0U;
#endif
}
 
extern int __must_check __ww_mutex_lock(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx);
 
/**
* ww_mutex_lock - acquire the w/w mutex
* @lock: the mutex to be acquired
* @ctx: w/w acquire context, or NULL to acquire only a single lock.
*
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
* wait/wound algorithm. If the lock isn't immediately avaiable this function
* will either sleep until it is (wait case). Or it selects the current context
* for backing off by returning -EDEADLK (wound case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired.
*
* In the wound case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
* lock and proceed with trying to acquire further w/w mutexes (e.g. when
* scanning through lru lists trying to free resources).
*
* The mutex must later on be released by the same task that
* acquired it. The task may not exit without first unlocking the mutex. Also,
* kernel memory where the mutex resides must not be freed with the mutex still
* locked. The mutex must first be initialized (or statically defined) before it
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
* of the same w/w lock class as was used to initialize the acquire context.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
if (ctx)
return __ww_mutex_lock(lock, ctx);
 
mutex_lock(&lock->base);
return 0;
}
 
/**
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
* wait/wound algorithm. If the lock isn't immediately avaiable this function
* will either sleep until it is (wait case). Or it selects the current context
* for backing off by returning -EDEADLK (wound case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
* signal arrives while waiting for the lock then this function returns -EINTR.
*
* In the wound case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
* not acquire this lock and proceed with trying to acquire further w/w mutexes
* (e.g. when scanning through lru lists trying to free resources).
*
* The mutex must later on be released by the same task that
* acquired it. The task may not exit without first unlocking the mutex. Also,
* kernel memory where the mutex resides must not be freed with the mutex still
* locked. The mutex must first be initialized (or statically defined) before it
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be
* of the same w/w lock class as was used to initialize the acquire context.
*
* A mutex acquired with this function must be released with ww_mutex_unlock.
*/
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
if (ctx)
return __ww_mutex_lock_interruptible(lock, ctx);
else
return mutex_lock_interruptible(&lock->base);
}
 
/**
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Acquires a w/w mutex with the given context after a wound case. This function
* will sleep until the lock becomes available.
*
* The caller must have released all w/w mutexes already acquired with the
* context and then call this function on the contended lock.
*
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it
* needs with ww_mutex_lock. Note that the -EALREADY return code from
* ww_mutex_lock can be used to avoid locking this contended mutex twice.
*
* It is forbidden to call this function with any other w/w mutexes associated
* with the context held. It is forbidden to call this on anything else than the
* contending mutex.
*
* Note that the slowpath lock acquiring can also be done by calling
* ww_mutex_lock directly. This function here is simply to help w/w mutex
* locking code readability by clearly denoting the slowpath.
*/
static inline void
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
int ret;
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
ret = ww_mutex_lock(lock, ctx);
(void)ret;
}
 
/**
* ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
* Acquires a w/w mutex with the given context after a wound case. This function
* will sleep until the lock becomes available and returns 0 when the lock has
* been acquired. If a signal arrives while waiting for the lock then this
* function returns -EINTR.
*
* The caller must have released all w/w mutexes already acquired with the
* context and then call this function on the contended lock.
*
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it
* needs with ww_mutex_lock. Note that the -EALREADY return code from
* ww_mutex_lock can be used to avoid locking this contended mutex twice.
*
* It is forbidden to call this function with any other w/w mutexes associated
* with the given context held. It is forbidden to call this on anything else
* than the contending mutex.
*
* Note that the slowpath lock acquiring can also be done by calling
* ww_mutex_lock_interruptible directly. This function here is simply to help
* w/w mutex locking code readability by clearly denoting the slowpath.
*/
static inline int __must_check
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock);
#endif
return ww_mutex_lock_interruptible(lock, ctx);
}
 
extern void ww_mutex_unlock(struct ww_mutex *lock);
 
/**
* ww_mutex_trylock - tries to acquire the w/w mutex without acquire context
* @lock: mutex to lock
*
* Trylocks a mutex without acquire context, so no deadlock detection is
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise.
*/
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock)
{
return mutex_trylock(&lock->base);
}
 
/***
* ww_mutex_destroy - mark a w/w mutex unusable
* @lock: the mutex to be destroyed
*
* This function marks the mutex uninitialized, and any subsequent
* use of the mutex is forbidden. The mutex must not be locked when
* this function is called.
*/
static inline void ww_mutex_destroy(struct ww_mutex *lock)
{
mutex_destroy(&lock->base);
}
 
/**
* ww_mutex_is_locked - is the w/w mutex locked
* @lock: the mutex to be queried
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
static inline bool ww_mutex_is_locked(struct ww_mutex *lock)
{
return mutex_is_locked(&lock->base);
}
 
#endif