/drivers/include/drm/radeon_drm.h |
---|
File deleted |
/drivers/include/drm/drm_mode.h |
---|
File deleted |
/drivers/include/drm/drm_fourcc.h |
---|
File deleted |
/drivers/include/drm/drmP.h |
---|
64,17 → 64,19 |
#include <linux/file.h> |
#include <linux/pci.h> |
#include <linux/jiffies.h> |
#include <linux/dma-mapping.h> |
#include <linux/irqreturn.h> |
//#include <linux/smp_lock.h> /* For (un)lock_kernel */ |
//#include <linux/dma-mapping.h> |
#include <linux/mutex.h> |
//#include <asm/io.h> |
//#include <asm/mman.h> |
#include <linux/slab.h> |
//#include <asm/uaccess.h> |
//#include <linux/workqueue.h> |
//#include <linux/poll.h> |
//#include <asm/pgalloc.h> |
#include <linux/types.h> |
#include <linux/workqueue.h> |
92,6 → 94,7 |
struct device_node; |
struct videomode; |
struct reservation_object; |
struct inode; |
struct poll_table_struct; |
117,48 → 120,41 |
#define DRM_SCANOUTPOS_INVBL (1 << 1) |
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) |
#define DRM_UT_CORE 0x01 |
#define DRM_UT_DRIVER 0x02 |
#define DRM_UT_KMS 0x04 |
#define DRM_UT_PRIME 0x08 |
/* |
* Three debug levels are defined. |
* drm_core, drm_driver, drm_kms |
* drm_core level can be used in the generic drm code. For example: |
* drm_ioctl, drm_mm, drm_memory |
* The macro definition of DRM_DEBUG is used. |
* DRM_DEBUG(fmt, args...) |
* The debug info by using the DRM_DEBUG can be obtained by adding |
* the boot option of "drm.debug=1". |
* 4 debug categories are defined: |
* |
* drm_driver level can be used in the specific drm driver. It is used |
* to add the debug info related with the drm driver. For example: |
* i915_drv, i915_dma, i915_gem, radeon_drv, |
* The macro definition of DRM_DEBUG_DRIVER can be used. |
* DRM_DEBUG_DRIVER(fmt, args...) |
* The debug info by using the DRM_DEBUG_DRIVER can be obtained by |
* adding the boot option of "drm.debug=0x02" |
* CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ... |
* This is the category used by the DRM_DEBUG() macro. |
* |
* drm_kms level can be used in the KMS code related with specific drm driver. |
* It is used to add the debug info related with KMS mode. For example: |
* the connector/crtc , |
* The macro definition of DRM_DEBUG_KMS can be used. |
* DRM_DEBUG_KMS(fmt, args...) |
* The debug info by using the DRM_DEBUG_KMS can be obtained by |
* adding the boot option of "drm.debug=0x04" |
* DRIVER: Used in the vendor specific part of the driver: i915, radeon, ... |
* This is the category used by the DRM_DEBUG_DRIVER() macro. |
* |
* If we add the boot option of "drm.debug=0x06", we can get the debug info by |
* using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER. |
* If we add the boot option of "drm.debug=0x05", we can get the debug info by |
* using the DRM_DEBUG_KMS and DRM_DEBUG. |
* KMS: used in the modesetting code. |
* This is the category used by the DRM_DEBUG_KMS() macro. |
* |
* PRIME: used in the prime code. |
* This is the category used by the DRM_DEBUG_PRIME() macro. |
* |
* Enabling verbose debug messages is done through the drm.debug parameter, |
* each category being enabled by a bit. |
* |
* drm.debug=0x1 will enable CORE messages |
* drm.debug=0x2 will enable DRIVER messages |
* drm.debug=0x3 will enable CORE and DRIVER messages |
* ... |
* drm.debug=0xf will enable all messages |
* |
* An interesting feature is that it's possible to enable verbose logging at |
* run-time by echoing the debug value in its sysfs node: |
* # echo 0xf > /sys/module/drm/parameters/debug |
*/ |
#define DRM_UT_CORE 0x01 |
#define DRM_UT_DRIVER 0x02 |
#define DRM_UT_KMS 0x04 |
#define DRM_UT_PRIME 0x08 |
extern __printf(4, 5) |
void drm_ut_debug_printk(unsigned int request_level, |
const char *prefix, |
const char *function_name, |
extern __printf(2, 3) |
void drm_ut_debug_printk(const char *function_name, |
const char *format, ...); |
extern __printf(2, 3) |
int drm_err(const char *func, const char *format, ...); |
192,8 → 188,6 |
also include looping detection. */ |
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ |
#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ |
#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ |
#define DRM_MAP_HASH_OFFSET 0x10000000 |
231,6 → 225,9 |
#define DRM_INFO(fmt, ...) \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) |
#define DRM_INFO_ONCE(fmt, ...) \ |
printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__) |
/** |
* Debug output. |
* |
238,49 → 235,28 |
* \param arg arguments |
*/ |
#if DRM_DEBUG_CODE |
#define DRM_DEBUG(fmt, ...) \ |
#define DRM_DEBUG(fmt, args...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_DRIVER(fmt, ...) \ |
#define DRM_DEBUG_DRIVER(fmt, args...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_KMS(fmt, ...) \ |
#define DRM_DEBUG_KMS(fmt, args...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_PRIME(fmt, ...) \ |
#define DRM_DEBUG_PRIME(fmt, args...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##args); \ |
} while (0) |
#define DRM_LOG(fmt, ...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
} while (0) |
#define DRM_LOG_KMS(fmt, ...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
} while (0) |
#define DRM_LOG_MODE(fmt, ...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
} while (0) |
#define DRM_LOG_DRIVER(fmt, ...) \ |
do { \ |
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \ |
} while (0) |
#else |
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0) |
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0) |
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0) |
#define DRM_DEBUG(fmt, arg...) do { } while (0) |
#define DRM_LOG(fmt, arg...) do { } while (0) |
#define DRM_LOG_KMS(fmt, args...) do { } while (0) |
#define DRM_LOG_MODE(fmt, arg...) do { } while (0) |
#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0) |
#endif |
/*@}*/ |
310,7 → 286,6 |
} \ |
} while (0) |
#if 0 |
/** |
* Ioctl function type. |
* |
351,6 → 326,7 |
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl} |
#if 0 |
struct drm_magic_entry { |
struct list_head head; |
struct drm_hash_item hash_item; |
405,18 → 381,7 |
}; |
#endif |
struct drm_freelist { |
int initialized; /**< Freelist in use */ |
atomic_t count; /**< Number of free buffers */ |
struct drm_buf *next; /**< End pointer */ |
wait_queue_head_t waiting; /**< Processes waiting on free bufs */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
atomic_t wfh; /**< If waiting for high mark */ |
spinlock_t lock; |
}; |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
void *vaddr; |
434,7 → 399,8 |
int page_order; |
struct drm_dma_handle **seglist; |
struct drm_freelist freelist; |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
}; |
/* Event queued up for userspace to read */ |
455,11 → 421,16 |
/** File private data */ |
struct drm_file { |
unsigned always_authenticated :1; |
unsigned authenticated :1; |
unsigned is_master :1; /* this file private is a master for a minor */ |
/* Whether we're master for a minor. Protected by master_mutex */ |
unsigned is_master :1; |
/* true when the client has asked us to expose stereo 3D mode flags */ |
unsigned stereo_allowed :1; |
/* |
* true if client understands CRTC primary planes and cursor planes |
* in the plane list |
*/ |
unsigned universal_planes:1; |
struct list_head lhead; |
unsigned long lock_count; |
470,7 → 441,16 |
void *driver_priv; |
struct drm_master *master; /* master this node is currently associated with |
N.B. not always minor->master */ |
/** |
* fbs - List of framebuffers associated with this file. |
* |
* Protected by fbs_lock. Note that the fbs list holds a reference on |
* the fb object to prevent it from untimely disappearing. |
*/ |
struct list_head fbs; |
struct mutex fbs_lock; |
wait_queue_head_t event_wait; |
struct list_head event_list; |
478,23 → 458,6 |
}; |
#if 0 |
/** Wait queue */ |
struct drm_queue { |
atomic_t use_count; /**< Outstanding uses (+1) */ |
atomic_t finalization; /**< Finalization in progress */ |
atomic_t block_count; /**< Count of processes waiting */ |
atomic_t block_read; /**< Queue blocked for reads */ |
wait_queue_head_t read_queue; /**< Processes waiting on block_read */ |
atomic_t block_write; /**< Queue blocked for writes */ |
wait_queue_head_t write_queue; /**< Processes waiting on block_write */ |
atomic_t total_queued; /**< Total queued statistic */ |
atomic_t total_flushed; /**< Total flushes statistic */ |
atomic_t total_locks; /**< Total locks statistics */ |
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ |
struct drm_waitlist waitlist; /**< Pending buffers */ |
wait_queue_head_t flush_queue; /**< Processes waiting until flush */ |
}; |
/** |
* Lock data. |
*/ |
578,7 → 541,6 |
#endif |
/** |
* Kernel side of a mapping |
*/ |
605,15 → 567,6 |
struct drm_master *master; |
}; |
/** |
* Context handle list |
*/ |
struct drm_ctx_list { |
struct list_head head; /**< list head */ |
drm_context_t handle; /**< context handle */ |
struct drm_file *tag; /**< associated fd private data */ |
}; |
/* location of GART table */ |
#define DRM_ATI_GART_MAIN 1 |
#define DRM_ATI_GART_FB 2 |
691,38 → 644,46 |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
/** |
* dma_buf - dma buf associated with this GEM object |
* |
* Pointer to the dma-buf associated with this gem object (either |
* through importing or exporting). We break the resulting reference |
* loop when the last gem handle for this object is released. |
* |
* Protected by obj->object_name_lock |
*/ |
struct dma_buf *dma_buf; |
}; |
#include <drm/drm_crtc.h> |
/* per-master structure */ |
/** |
* struct drm_master - drm master structure |
* |
* @refcount: Refcount for this master object. |
* @minor: Link back to minor char device we are master for. Immutable. |
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. |
* @unique_len: Length of unique field. Protected by drm_global_mutex. |
* @unique_size: Amount allocated. Protected by drm_global_mutex. |
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex. |
* @magicfree: List of used authentication tokens. Protected by struct_mutex. |
* @lock: DRI lock information. |
* @driver_priv: Pointer to driver-private information. |
*/ |
struct drm_master { |
struct kref refcount; /* refcount for this master */ |
struct list_head head; /**< each minor contains a list of masters */ |
struct drm_minor *minor; /**< link back to minor we are a master for */ |
char *unique; /**< Unique identifier: e.g., busid */ |
int unique_len; /**< Length of unique field */ |
int unique_size; /**< amount allocated */ |
int blocked; /**< Blocked due to VC switch? */ |
/** \name Authentication */ |
/*@{ */ |
struct kref refcount; |
struct drm_minor *minor; |
char *unique; |
int unique_len; |
int unique_size; |
// struct drm_open_hash magiclist; |
// struct list_head magicfree; |
/*@} */ |
// struct drm_lock_data lock; /**< Information on hardware lock */ |
void *driver_priv; /**< Private structure for driver to use */ |
// struct drm_lock_data lock; |
void *driver_priv; |
}; |
#if 0 |
/* Size of ringbuffer for vblank timestamps. Just double-buffer |
* in initial implementation. |
*/ |
739,18 → 700,9 |
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) |
struct drm_bus { |
int bus_type; |
int (*get_irq)(struct drm_device *dev); |
const char *(*get_name)(struct drm_device *dev); |
int (*set_busid)(struct drm_device *dev, struct drm_master *master); |
int (*set_unique)(struct drm_device *dev, struct drm_master *master, |
struct drm_unique *unique); |
int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); |
}; |
#endif |
#define DRM_IRQ_ARGS int irq, void *arg |
/** |
* DRM driver structure. This structure represent the common code for |
* a family of cards. There will one drm_device for each card present |
876,7 → 828,7 |
/* these have to be filled in */ |
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); |
irqreturn_t(*irq_handler) (int irq, void *arg); |
void (*irq_preinstall) (struct drm_device *dev); |
int (*irq_postinstall) (struct drm_device *dev); |
void (*irq_uninstall) (struct drm_device *dev); |
891,12 → 843,15 |
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); |
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); |
u32 driver_features; |
int dev_priv_size; |
}; |
#define DRM_MINOR_UNASSIGNED 0 |
#define DRM_MINOR_LEGACY 1 |
#define DRM_MINOR_CONTROL 2 |
#define DRM_MINOR_RENDER 3 |
enum drm_minor_type { |
DRM_MINOR_LEGACY, |
DRM_MINOR_CONTROL, |
DRM_MINOR_RENDER, |
DRM_MINOR_CNT, |
}; |
/** |
* Info file list entry. This structure represents a debugfs or proc file to |
925,33 → 880,17 |
struct drm_minor { |
int index; /**< Minor device number */ |
int type; /**< Control or render */ |
// dev_t device; /**< Device number for mknod */ |
// struct device kdev; /**< Linux device */ |
struct drm_device *dev; |
// struct proc_dir_entry *proc_root; /**< proc directory entry */ |
// struct drm_info_node proc_nodes; |
// struct dentry *debugfs_root; |
// struct drm_info_node debugfs_nodes; |
struct dentry *debugfs_root; |
struct drm_master *master; /* currently active master for this node */ |
// struct list_head master_list; |
// struct drm_mode_group mode_group; |
}; |
struct list_head debugfs_list; |
struct mutex debugfs_lock; /* Protects debugfs_list. */ |
/* mode specified on the command line */ |
struct drm_cmdline_mode { |
bool specified; |
bool refresh_specified; |
bool bpp_specified; |
int xres, yres; |
int bpp; |
int refresh; |
bool rb; |
bool interlace; |
bool cvt; |
bool margins; |
enum drm_connector_force force; |
/* currently active master for this node. Protected by master_mutex */ |
struct drm_master *master; |
struct drm_mode_group mode_group; |
}; |
962,18 → 901,23 |
*/ |
struct drm_device { |
struct list_head legacy_dev_list;/**< list of devices per driver for stealth attach cleanup */ |
char *devname; /**< For /proc/interrupts */ |
int if_version; /**< Highest interface version set */ |
struct device *dev; /**< Device structure of bus-device */ |
struct drm_driver *driver; /**< DRM driver managing the device */ |
void *dev_private; /**< DRM driver private data */ |
struct drm_minor *primary; /**< Primary node */ |
atomic_t unplugged; /**< Flag whether dev is dead */ |
/** \name Locks */ |
/*@{ */ |
spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
struct mutex struct_mutex; /**< For others */ |
struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */ |
/*@} */ |
/** \name Usage Counters */ |
/*@{ */ |
int open_count; /**< Outstanding files open */ |
int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ |
spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ |
int buf_use; /**< Buffers in use -- cannot alloc */ |
atomic_t buf_alloc; /**< Buffer allocation in progress */ |
/*@} */ |
1003,6 → 947,8 |
/** \name Context support */ |
/*@{ */ |
bool irq_enabled; /**< True if irq handler is enabled */ |
int irq; |
__volatile__ long context_flag; /**< Context swapping flag */ |
int last_context; /**< Last current context */ |
/*@} */ |
1018,7 → 964,12 |
*/ |
bool vblank_disable_allowed; |
/* array of size num_crtcs */ |
struct drm_vblank_crtc *vblank; |
spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ |
spinlock_t vbl_lock; |
u32 max_vblank_count; /**< size of vblank counter register */ |
/** |
1031,21 → 982,10 |
// struct drm_agp_head *agp; /**< AGP data */ |
struct device *dev; /**< Device structure */ |
struct pci_dev *pdev; /**< PCI device structure */ |
int pci_vendor; /**< PCI vendor id */ |
int pci_device; /**< PCI device id */ |
unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
void *dev_private; /**< device private data */ |
struct address_space *dev_mapping; |
// struct drm_sigdata sigdata; /**< For block_all_signals */ |
// sigset_t sigmask; |
struct drm_driver *driver; |
// struct drm_local_map *agp_buffer_map; |
// unsigned int agp_buffer_token; |
// struct drm_minor *control; /**< Control node for card */ |
struct drm_minor *primary; /**< render type primary screen head */ |
struct drm_mode_config mode_config; /**< Current mode config */ |
1056,8 → 996,6 |
struct drm_vma_offset_manager *vma_offset_manager; |
/*@} */ |
int switch_power_state; |
atomic_t unplugged; /* device has been unplugged or gone away */ |
}; |
#define DRM_SWITCH_POWER_ON 0 |
1071,11 → 1009,6 |
return ((dev->driver->driver_features & feature) ? 1 : 0); |
} |
static inline int drm_dev_to_irq(struct drm_device *dev) |
{ |
return dev->pdev->irq; |
} |
static inline void drm_device_set_unplugged(struct drm_device *dev) |
{ |
smp_wmb(); |
1089,10 → 1022,6 |
return ret; |
} |
static inline bool drm_modeset_is_locked(struct drm_device *dev) |
{ |
return mutex_is_locked(&dev->mode_config.mutex); |
} |
/******************************************************************/ |
/** \name Internal function definitions */ |
1104,11 → 1033,11 |
extern long drm_compat_ioctl(struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern int drm_lastclose(struct drm_device *dev); |
extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags); |
/* Device support (drm_fops.h) */ |
extern struct mutex drm_global_mutex; |
extern int drm_open(struct inode *inode, struct file *filp); |
extern int drm_stub_open(struct inode *inode, struct file *filp); |
extern ssize_t drm_read(struct file *filp, char __user *buffer, |
size_t count, loff_t *offset); |
extern int drm_release(struct inode *inode, struct file *filp); |
1146,29 → 1075,6 |
extern int drm_noop(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Context IOCTL support (drm_context.h) */ |
extern int drm_resctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_switchctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_newctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_ctxbitmap_init(struct drm_device *dev); |
extern void drm_ctxbitmap_cleanup(struct drm_device *dev); |
extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); |
extern int drm_setsareactx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getsareactx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Authentication IOCTL support (drm_auth.h) */ |
extern int drm_getmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
1179,7 → 1085,7 |
/* Cache management (drm_cache.c) */ |
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); |
void drm_clflush_sg(struct sg_table *st); |
void drm_clflush_virt_range(char *addr, unsigned long length); |
void drm_clflush_virt_range(void *addr, unsigned long length); |
/* Locking IOCTL support (drm_lock.h) */ |
extern int drm_lock(struct drm_device *dev, void *data, |
1232,7 → 1138,7 |
/* IRQ support (drm_irq.h) */ |
extern int drm_control(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_irq_install(struct drm_device *dev); |
extern int drm_irq_install(struct drm_device *dev, int irq); |
extern int drm_irq_uninstall(struct drm_device *dev); |
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); |
1246,8 → 1152,14 |
extern bool drm_handle_vblank(struct drm_device *dev, int crtc); |
extern int drm_vblank_get(struct drm_device *dev, int crtc); |
extern void drm_vblank_put(struct drm_device *dev, int crtc); |
extern int drm_crtc_vblank_get(struct drm_crtc *crtc); |
extern void drm_crtc_vblank_put(struct drm_crtc *crtc); |
extern void drm_vblank_off(struct drm_device *dev, int crtc); |
extern void drm_vblank_on(struct drm_device *dev, int crtc); |
extern void drm_crtc_vblank_off(struct drm_crtc *crtc); |
extern void drm_crtc_vblank_on(struct drm_crtc *crtc); |
extern void drm_vblank_cleanup(struct drm_device *dev); |
extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, |
struct timeval *tvblank, unsigned flags); |
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, |
1259,21 → 1171,7 |
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc, |
const struct drm_display_mode *mode); |
extern bool |
drm_mode_parse_command_line_for_connector(const char *mode_option, |
struct drm_connector *connector, |
struct drm_cmdline_mode *mode); |
extern struct drm_display_mode * |
drm_mode_create_from_cmdline_mode(struct drm_device *dev, |
struct drm_cmdline_mode *cmd); |
extern int drm_display_mode_from_videomode(const struct videomode *vm, |
struct drm_display_mode *dmode); |
extern int of_get_drm_display_mode(struct device_node *np, |
struct drm_display_mode *dmode, |
int index); |
/* Modesetting support */ |
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); |
1294,7 → 1192,6 |
extern void drm_put_dev(struct drm_device *dev); |
extern void drm_unplug_dev(struct drm_device *dev); |
extern unsigned int drm_debug; |
extern unsigned int drm_rnodes; |
#if 0 |
extern unsigned int drm_vblank_offdelay; |
1302,12 → 1199,9 |
extern unsigned int drm_timestamp_monotonic; |
extern struct class *drm_class; |
extern struct dentry *drm_debugfs_root; |
extern struct idr drm_minors_idr; |
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); |
#endif |
/* Debugfs support */ |
#if defined(CONFIG_DEBUG_FS) |
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
1318,6 → 1212,8 |
extern int drm_debugfs_remove_files(const struct drm_info_list *files, |
int count, struct drm_minor *minor); |
extern int drm_debugfs_cleanup(struct drm_minor *minor); |
extern int drm_debugfs_connector_add(struct drm_connector *connector); |
extern void drm_debugfs_connector_remove(struct drm_connector *connector); |
#else |
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root) |
1342,6 → 1238,15 |
{ |
return 0; |
} |
static inline int drm_debugfs_connector_add(struct drm_connector *connector) |
{ |
return 0; |
} |
static inline void drm_debugfs_connector_remove(struct drm_connector *connector) |
{ |
} |
#endif |
/* Info file support */ |
1368,7 → 1273,6 |
struct drm_ati_pcigart_info * gart_info); |
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
#endif |
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, |
size_t align); |
1380,9 → 1284,8 |
struct drm_sysfs_class; |
extern struct class *drm_sysfs_create(struct module *owner, char *name); |
extern void drm_sysfs_destroy(void); |
extern int drm_sysfs_device_add(struct drm_minor *minor); |
extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor); |
extern void drm_sysfs_hotplug_event(struct drm_device *dev); |
extern void drm_sysfs_device_remove(struct drm_minor *minor); |
extern int drm_sysfs_connector_add(struct drm_connector *connector); |
extern void drm_sysfs_connector_remove(struct drm_connector *connector); |
#endif |
1443,7 → 1346,7 |
int drm_gem_create_mmap_offset(struct drm_gem_object *obj); |
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size); |
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); |
struct page **drm_gem_get_pages(struct drm_gem_object *obj); |
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, |
bool dirty, bool accessed); |
1477,7 → 1380,7 |
{ |
} |
//#include <drm/drm_mem_util.h> |
#include <drm/drm_mem_util.h> |
extern int drm_fill_in_dev(struct drm_device *dev, |
const struct pci_device_id *ent, |
1486,10 → 1389,13 |
/*@}*/ |
#if 0 |
extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); |
extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); |
extern int drm_get_pci_dev(struct pci_dev *pdev, |
const struct pci_device_id *ent, |
struct drm_driver *driver); |
#endif |
#define DRM_PCIE_SPEED_25 1 |
#define DRM_PCIE_SPEED_50 2 |
/drivers/include/drm/drm_crtc.h |
---|
32,8 → 32,8 |
#include <linux/fb.h> |
#include <linux/hdmi.h> |
#include <drm/drm_mode.h> |
#include <drm/drm_fourcc.h> |
#include <drm/drm_modeset_lock.h> |
struct drm_device; |
struct drm_mode_set; |
41,6 → 41,7 |
struct drm_object_properties; |
struct drm_file; |
struct drm_clip_rect; |
struct device_node; |
#define DRM_MODE_OBJECT_CRTC 0xcccccccc |
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 |
51,6 → 52,7 |
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb |
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee |
#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd |
#define DRM_MODE_OBJECT_ANY 0 |
struct drm_mode_object { |
uint32_t id; |
65,130 → 67,31 |
uint64_t values[DRM_OBJECT_MAX_PROPERTY]; |
}; |
/* |
* Note on terminology: here, for brevity and convenience, we refer to connector |
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, |
* DVI, etc. And 'screen' refers to the whole of the visible display, which |
* may span multiple monitors (and therefore multiple CRTC and connector |
* structures). |
*/ |
static inline int64_t U642I64(uint64_t val) |
{ |
return (int64_t)*((int64_t *)&val); |
} |
static inline uint64_t I642U64(int64_t val) |
{ |
return (uint64_t)*((uint64_t *)&val); |
} |
enum drm_mode_status { |
MODE_OK = 0, /* Mode OK */ |
MODE_HSYNC, /* hsync out of range */ |
MODE_VSYNC, /* vsync out of range */ |
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_BAD_WIDTH, /* requires an unsupported linepitch */ |
MODE_NOMODE, /* no mode with a matching name */ |
MODE_NO_INTERLACE, /* interlaced mode not supported */ |
MODE_NO_DBLESCAN, /* doublescan mode not supported */ |
MODE_NO_VSCAN, /* multiscan mode not supported */ |
MODE_MEM, /* insufficient video memory */ |
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ |
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ |
MODE_MEM_VIRT, /* insufficient video memory given virtual size */ |
MODE_NOCLOCK, /* no fixed clock available */ |
MODE_CLOCK_HIGH, /* clock required is too high */ |
MODE_CLOCK_LOW, /* clock required is too low */ |
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ |
MODE_BAD_HVALUE, /* horizontal timing was out of range */ |
MODE_BAD_VVALUE, /* vertical timing was out of range */ |
MODE_BAD_VSCAN, /* VScan value out of range */ |
MODE_HSYNC_NARROW, /* horizontal sync too narrow */ |
MODE_HSYNC_WIDE, /* horizontal sync too wide */ |
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ |
MODE_HBLANK_WIDE, /* horizontal blanking too wide */ |
MODE_VSYNC_NARROW, /* vertical sync too narrow */ |
MODE_VSYNC_WIDE, /* vertical sync too wide */ |
MODE_VBLANK_NARROW, /* vertical blanking too narrow */ |
MODE_VBLANK_WIDE, /* vertical blanking too wide */ |
MODE_PANEL, /* exceeds panel dimensions */ |
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ |
MODE_ONE_WIDTH, /* only one width is supported */ |
MODE_ONE_HEIGHT, /* only one height is supported */ |
MODE_ONE_SIZE, /* only one resolution is supported */ |
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ |
MODE_NO_STEREO, /* stereo modes not supported */ |
MODE_UNVERIFIED = -3, /* mode needs to reverified */ |
MODE_BAD = -2, /* unspecified reason */ |
MODE_ERROR = -1 /* error condition */ |
}; |
/* rotation property bits */ |
#define DRM_ROTATE_0 0 |
#define DRM_ROTATE_90 1 |
#define DRM_ROTATE_180 2 |
#define DRM_ROTATE_270 3 |
#define DRM_REFLECT_X 4 |
#define DRM_REFLECT_Y 5 |
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ |
DRM_MODE_TYPE_CRTC_C) |
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ |
.name = nm, .status = 0, .type = (t), .clock = (c), \ |
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ |
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ |
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ |
.vscan = (vs), .flags = (f), \ |
.base.type = DRM_MODE_OBJECT_MODE |
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ |
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ |
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF |
struct drm_display_mode { |
/* Header */ |
struct list_head head; |
struct drm_mode_object base; |
char name[DRM_DISPLAY_MODE_LEN]; |
enum drm_mode_status status; |
unsigned int type; |
/* Proposed mode values */ |
int clock; /* in kHz */ |
int hdisplay; |
int hsync_start; |
int hsync_end; |
int htotal; |
int hskew; |
int vdisplay; |
int vsync_start; |
int vsync_end; |
int vtotal; |
int vscan; |
unsigned int flags; |
/* Addressable image size (may be 0 for projectors, etc.) */ |
int width_mm; |
int height_mm; |
/* Actual mode we give to hw */ |
int crtc_clock; /* in KHz */ |
int crtc_hdisplay; |
int crtc_hblank_start; |
int crtc_hblank_end; |
int crtc_hsync_start; |
int crtc_hsync_end; |
int crtc_htotal; |
int crtc_hskew; |
int crtc_vdisplay; |
int crtc_vblank_start; |
int crtc_vblank_end; |
int crtc_vsync_start; |
int crtc_vsync_end; |
int crtc_vtotal; |
/* Driver private mode info */ |
int private_size; |
int *private; |
int private_flags; |
int vrefresh; /* in Hz */ |
int hsync; /* in kHz */ |
enum hdmi_picture_aspect picture_aspect_ratio; |
enum drm_connector_force { |
DRM_FORCE_UNSPECIFIED, |
DRM_FORCE_OFF, |
DRM_FORCE_ON, /* force on analog part normally */ |
DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ |
}; |
static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) |
{ |
return mode->flags & DRM_MODE_FLAG_3D_MASK; |
} |
#include <drm/drm_modes.h> |
enum drm_connector_status { |
connector_status_connected = 1, |
227,6 → 130,9 |
enum subpixel_order subpixel_order; |
u32 color_formats; |
/* Mask of supported hdmi deep color modes */ |
u8 edid_hdmi_dc_modes; |
u8 cea_rev; |
}; |
307,10 → 213,15 |
char name[DRM_PROP_NAME_LEN]; |
uint32_t num_values; |
uint64_t *values; |
struct drm_device *dev; |
struct list_head enum_blob_list; |
}; |
void drm_modeset_lock_all(struct drm_device *dev); |
void drm_modeset_unlock_all(struct drm_device *dev); |
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); |
struct drm_crtc; |
struct drm_connector; |
struct drm_encoder; |
386,7 → 297,10 |
* drm_crtc - central CRTC control structure |
* @dev: parent DRM device |
* @head: list management |
* @mutex: per-CRTC locking |
* @base: base KMS object for ID tracking etc. |
* @primary: primary plane for this CRTC |
* @cursor: cursor plane for this CRTC |
* @enabled: is this CRTC enabled? |
* @mode: current mode timings |
* @hwmode: mode timings as programmed to hw regs |
409,6 → 323,7 |
*/ |
struct drm_crtc { |
struct drm_device *dev; |
struct device_node *port; |
struct list_head head; |
/** |
418,13 → 333,18 |
* state, ...) and a write lock for everything which can be update |
* without a full modeset (fb, cursor data, ...) |
*/ |
struct mutex mutex; |
struct drm_modeset_lock mutex; |
struct drm_mode_object base; |
/* framebuffer the connector is currently bound to */ |
struct drm_framebuffer *fb; |
/* primary and cursor planes for CRTC */ |
struct drm_plane *primary; |
struct drm_plane *cursor; |
/* position of cursor plane on crtc */ |
int cursor_x; |
int cursor_y; |
/* Temporary tracking of the old fb while a modeset is ongoing. Used |
* by drm_mode_set_config_internal to implement correct refcounting. */ |
struct drm_framebuffer *old_fb; |
514,6 → 434,7 |
* @dev: parent DRM device |
* @head: list management |
* @base: base KMS object |
* @name: encoder name |
* @encoder_type: one of the %DRM_MODE_ENCODER_<foo> types in drm_mode.h |
* @possible_crtcs: bitmask of potential CRTC bindings |
* @possible_clones: bitmask of potential sibling encoders for cloning |
530,6 → 451,7 |
struct list_head head; |
struct drm_mode_object base; |
char *name; |
int encoder_type; |
uint32_t possible_crtcs; |
uint32_t possible_clones; |
540,13 → 462,6 |
void *helper_private; |
}; |
enum drm_connector_force { |
DRM_FORCE_UNSPECIFIED, |
DRM_FORCE_OFF, |
DRM_FORCE_ON, /* force on analog part normally */ |
DRM_FORCE_ON_DIGITAL, /* for DVI-I use digital connector */ |
}; |
/* should we poll this connector for connects and disconnects */ |
/* hot plug detectable */ |
#define DRM_CONNECTOR_POLL_HPD (1 << 0) |
565,6 → 480,7 |
* @attr: sysfs attributes |
* @head: list management |
* @base: base KMS object |
* @name: connector name |
* @connector_type: one of the %DRM_MODE_CONNECTOR_<foo> types from drm_mode.h |
* @connector_type_id: index into connector type enum |
* @interlace_allowed: can this connector handle interlaced modes? |
603,6 → 519,7 |
struct drm_mode_object base; |
char *name; |
int connector_type; |
int connector_type_id; |
bool interlace_allowed; |
621,6 → 538,8 |
struct drm_property_blob *edid_blob_ptr; |
struct drm_object_properties properties; |
struct drm_property_blob *path_blob_ptr; |
uint8_t polled; /* DRM_CONNECTOR_POLL_* */ |
/* requested DPMS state */ |
630,6 → 549,7 |
/* forced on connector */ |
enum drm_connector_force force; |
bool override_edid; |
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; |
struct drm_encoder *encoder; /* currently active encoder */ |
642,6 → 562,8 |
int audio_latency[2]; |
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */ |
unsigned bad_edid_counter; |
struct dentry *debugfs_entry; |
}; |
/** |
665,6 → 587,12 |
struct drm_property *property, uint64_t val); |
}; |
enum drm_plane_type { |
DRM_PLANE_TYPE_OVERLAY, |
DRM_PLANE_TYPE_PRIMARY, |
DRM_PLANE_TYPE_CURSOR, |
}; |
/** |
* drm_plane - central DRM plane control structure |
* @dev: DRM device this plane belongs to |
677,6 → 605,7 |
* @fb: currently bound fb |
* @funcs: helper functions |
* @properties: property tracking for this plane |
* @type: type of plane (overlay, primary, cursor) |
*/ |
struct drm_plane { |
struct drm_device *dev; |
694,6 → 623,8 |
const struct drm_plane_funcs *funcs; |
struct drm_object_properties properties; |
enum drm_plane_type type; |
}; |
/** |
835,6 → 766,8 |
*/ |
struct drm_mode_config { |
struct mutex mutex; /* protects configuration (mode lists etc.) */ |
struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */ |
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ |
struct mutex idr_mutex; /* for IDR management */ |
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
/* this is limited to one for now */ |
856,7 → 789,15 |
struct list_head bridge_list; |
int num_encoder; |
struct list_head encoder_list; |
int num_plane; |
/* |
* Track # of overlay planes separately from # of total planes. By |
* default we only advertise overlay planes to userspace; if userspace |
* sets the "universal plane" capability bit, we'll go ahead and |
* expose all planes. |
*/ |
int num_overlay_plane; |
int num_total_plane; |
struct list_head plane_list; |
int num_crtc; |
878,6 → 819,8 |
struct list_head property_blob_list; |
struct drm_property *edid_property; |
struct drm_property *dpms_property; |
struct drm_property *path_property; |
struct drm_property *plane_type_property; |
/* DVI-I properties */ |
struct drm_property *dvi_i_subconnector_property; |
900,6 → 843,7 |
/* Optional properties */ |
struct drm_property *scaling_mode_property; |
struct drm_property *aspect_ratio_property; |
struct drm_property *dirty_info_property; |
/* dumb ioctl parameters */ |
907,6 → 851,9 |
/* whether async page flip is supported or not */ |
bool async_page_flip; |
/* cursor size */ |
uint32_t cursor_width, cursor_height; |
}; |
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
923,10 → 870,11 |
char *name; |
}; |
extern void drm_modeset_lock_all(struct drm_device *dev); |
extern void drm_modeset_unlock_all(struct drm_device *dev); |
extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); |
extern int drm_crtc_init_with_planes(struct drm_device *dev, |
struct drm_crtc *crtc, |
struct drm_plane *primary, |
struct drm_plane *cursor, |
const struct drm_crtc_funcs *funcs); |
extern int drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
951,6 → 899,8 |
struct drm_connector *connector, |
const struct drm_connector_funcs *funcs, |
int connector_type); |
int drm_connector_register(struct drm_connector *connector); |
void drm_connector_unregister(struct drm_connector *connector); |
extern void drm_connector_cleanup(struct drm_connector *connector); |
/* helper to unplug all connectors from sysfs for device */ |
978,19 → 928,30 |
return !!(encoder->possible_crtcs & drm_crtc_mask(crtc)); |
} |
extern int drm_universal_plane_init(struct drm_device *dev, |
struct drm_plane *plane, |
unsigned long possible_crtcs, |
const struct drm_plane_funcs *funcs, |
const uint32_t *formats, |
uint32_t format_count, |
enum drm_plane_type type); |
extern int drm_plane_init(struct drm_device *dev, |
struct drm_plane *plane, |
unsigned long possible_crtcs, |
const struct drm_plane_funcs *funcs, |
const uint32_t *formats, uint32_t format_count, |
bool priv); |
bool is_primary); |
extern void drm_plane_cleanup(struct drm_plane *plane); |
extern void drm_plane_force_disable(struct drm_plane *plane); |
extern int drm_crtc_check_viewport(const struct drm_crtc *crtc, |
int x, int y, |
const struct drm_display_mode *mode, |
const struct drm_framebuffer *fb); |
extern void drm_encoder_cleanup(struct drm_encoder *encoder); |
extern const char *drm_get_connector_name(const struct drm_connector *connector); |
extern const char *drm_get_connector_status_name(enum drm_connector_status status); |
extern const char *drm_get_subpixel_order_name(enum subpixel_order order); |
extern const char *drm_get_dpms_name(int val); |
extern const char *drm_get_dvi_i_subconnector_name(int val); |
extern const char *drm_get_dvi_i_select_name(int val); |
998,41 → 959,38 |
extern const char *drm_get_tv_select_name(int val); |
extern void drm_fb_release(struct drm_file *file_priv); |
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); |
extern void drm_mode_group_destroy(struct drm_mode_group *group); |
extern void drm_reinit_primary_mode_group(struct drm_device *dev); |
extern bool drm_probe_ddc(struct i2c_adapter *adapter); |
extern struct edid *drm_get_edid(struct drm_connector *connector, |
struct i2c_adapter *adapter); |
extern struct edid *drm_edid_duplicate(const struct edid *edid); |
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); |
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); |
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
const struct drm_display_mode *mode); |
extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); |
extern void drm_mode_config_init(struct drm_device *dev); |
extern void drm_mode_config_reset(struct drm_device *dev); |
extern void drm_mode_config_cleanup(struct drm_device *dev); |
extern void drm_mode_set_name(struct drm_display_mode *mode); |
extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); |
extern bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2); |
extern int drm_mode_width(const struct drm_display_mode *mode); |
extern int drm_mode_height(const struct drm_display_mode *mode); |
/* for us by fb module */ |
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); |
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); |
extern void drm_mode_validate_size(struct drm_device *dev, |
struct list_head *mode_list, |
int maxX, int maxY, int maxPitch); |
extern void drm_mode_prune_invalid(struct drm_device *dev, |
struct list_head *mode_list, bool verbose); |
extern void drm_mode_sort(struct list_head *mode_list); |
extern int drm_mode_hsync(const struct drm_display_mode *mode); |
extern int drm_mode_vrefresh(const struct drm_display_mode *mode); |
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, |
int adjust_flags); |
extern void drm_mode_connector_list_update(struct drm_connector *connector); |
extern int drm_mode_connector_set_path_property(struct drm_connector *connector, |
char *path); |
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
struct edid *edid); |
static inline bool drm_property_type_is(struct drm_property *property, |
uint32_t type) |
{ |
/* instanceof for props.. handles extended type vs original types: */ |
if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) |
return (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) == type; |
return property->flags & type; |
} |
static inline bool drm_property_type_valid(struct drm_property *property) |
{ |
if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE) |
return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE); |
return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE); |
} |
extern int drm_object_property_set_value(struct drm_mode_object *obj, |
struct drm_property *property, |
uint64_t val); |
1062,10 → 1020,16 |
struct drm_property *drm_property_create_bitmask(struct drm_device *dev, |
int flags, const char *name, |
const struct drm_prop_enum_list *props, |
int num_values); |
int num_props, |
uint64_t supported_bits); |
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, |
const char *name, |
uint64_t min, uint64_t max); |
struct drm_property *drm_property_create_signed_range(struct drm_device *dev, |
int flags, const char *name, |
int64_t min, int64_t max); |
struct drm_property *drm_property_create_object(struct drm_device *dev, |
int flags, const char *name, uint32_t type); |
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); |
extern int drm_property_add_enum(struct drm_property *property, int index, |
uint64_t value, const char *name); |
1073,17 → 1037,16 |
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, |
char *formats[]); |
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); |
extern int drm_mode_create_dirty_info_property(struct drm_device *dev); |
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder); |
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
int gamma_size); |
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
uint32_t id, uint32_t type); |
/* IOCTLs */ |
extern int drm_mode_getresources(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
1129,21 → 1092,12 |
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match); |
extern enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code); |
extern bool drm_detect_hdmi_monitor(struct edid *edid); |
extern bool drm_detect_monitor_audio(struct edid *edid); |
extern bool drm_rgb_quant_range_selectable(struct edid *edid); |
extern int drm_mode_page_flip_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool reduced, bool interlaced, bool margins); |
extern struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool interlaced, int margins); |
extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool interlaced, int margins, int GTF_M, |
int GTF_2C, int GTF_K, int GTF_2J); |
extern int drm_add_modes_noedid(struct drm_connector *connector, |
int hdisplay, int vdisplay); |
extern void drm_set_preferred_mode(struct drm_connector *connector, |
1174,8 → 1128,21 |
extern int drm_format_horz_chroma_subsampling(uint32_t format); |
extern int drm_format_vert_chroma_subsampling(uint32_t format); |
extern const char *drm_get_format_name(uint32_t format); |
extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, |
unsigned int supported_rotations); |
extern unsigned int drm_rotation_simplify(unsigned int rotation, |
unsigned int supported_rotations); |
/* Helpers */ |
static inline struct drm_plane *drm_plane_find(struct drm_device *dev, |
uint32_t id) |
{ |
struct drm_mode_object *mo; |
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PLANE); |
return mo ? obj_to_plane(mo) : NULL; |
} |
static inline struct drm_crtc *drm_crtc_find(struct drm_device *dev, |
uint32_t id) |
{ |
1192,4 → 1159,33 |
return mo ? obj_to_encoder(mo) : NULL; |
} |
static inline struct drm_connector *drm_connector_find(struct drm_device *dev, |
uint32_t id) |
{ |
struct drm_mode_object *mo; |
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_CONNECTOR); |
return mo ? obj_to_connector(mo) : NULL; |
} |
static inline struct drm_property *drm_property_find(struct drm_device *dev, |
uint32_t id) |
{ |
struct drm_mode_object *mo; |
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_PROPERTY); |
return mo ? obj_to_property(mo) : NULL; |
} |
static inline struct drm_property_blob * |
drm_property_blob_find(struct drm_device *dev, uint32_t id) |
{ |
struct drm_mode_object *mo; |
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_BLOB); |
return mo ? obj_to_blob(mo) : NULL; |
} |
/* Plane list iterator for legacy (overlay only) planes. */ |
#define drm_for_each_legacy_plane(plane, planelist) \ |
list_for_each_entry(plane, planelist, head) \ |
if (plane->type == DRM_PLANE_TYPE_OVERLAY) |
#endif /* __DRM_CRTC_H__ */ |
/drivers/include/drm/drm_crtc_helper.h |
---|
114,7 → 114,7 |
/** |
* drm_connector_helper_funcs - helper operations for connectors |
* @get_modes: get mode list for this connector |
* @mode_valid: is this mode valid on the given connector? |
* @mode_valid (optional): is this mode valid on the given connector? |
* |
* The helper operations are called by the mid-layer CRTC helper. |
*/ |
125,7 → 125,6 |
struct drm_encoder *(*best_encoder)(struct drm_connector *connector); |
}; |
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); |
extern void drm_helper_disable_unused_functions(struct drm_device *dev); |
extern int drm_crtc_helper_set_config(struct drm_mode_set *set); |
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, |
139,7 → 138,7 |
extern void drm_helper_move_panel_connectors_to_head(struct drm_device *); |
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, |
extern void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, |
struct drm_mode_fb_cmd2 *mode_cmd); |
static inline void drm_crtc_helper_add(struct drm_crtc *crtc, |
160,7 → 159,16 |
connector->helper_private = (void *)funcs; |
} |
extern int drm_helper_resume_force_mode(struct drm_device *dev); |
extern void drm_helper_resume_force_mode(struct drm_device *dev); |
/* drm_probe_helper.c */ |
extern int drm_helper_probe_single_connector_modes(struct drm_connector |
*connector, uint32_t maxX, |
uint32_t maxY); |
extern int drm_helper_probe_single_connector_modes_nomerge(struct drm_connector |
*connector, |
uint32_t maxX, |
uint32_t maxY); |
extern void drm_kms_helper_poll_init(struct drm_device *dev); |
extern void drm_kms_helper_poll_fini(struct drm_device *dev); |
extern bool drm_helper_hpd_irq_event(struct drm_device *dev); |
/drivers/include/drm/drm_dp_helper.h |
---|
37,6 → 37,7 |
* eDP: Embedded DisplayPort version 1 |
* DPI: DisplayPort Interoperability Guideline v1.1a |
* 1.2: DisplayPort 1.2 |
* MST: Multistream Transport - part of DP 1.2a |
* |
* 1.2 formally includes both eDP and DPI definitions. |
*/ |
103,9 → 104,14 |
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ |
/* Multiple stream transport */ |
#define DP_FAUX_CAP 0x020 /* 1.2 */ |
# define DP_FAUX_CAP_1 (1 << 0) |
#define DP_MSTM_CAP 0x021 /* 1.2 */ |
# define DP_MST_CAP (1 << 0) |
#define DP_GUID 0x030 /* 1.2 */ |
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ |
# define DP_PSR_IS_SUPPORTED 1 |
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ |
221,6 → 227,16 |
# define DP_PSR_CRC_VERIFICATION (1 << 2) |
# define DP_PSR_FRAME_CAPTURE (1 << 3) |
#define DP_ADAPTER_CTRL 0x1a0 |
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) |
#define DP_BRANCH_DEVICE_CTRL 0x1a1 |
# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) |
#define DP_PAYLOAD_ALLOCATE_SET 0x1c0 |
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 |
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 |
#define DP_SINK_COUNT 0x200 |
/* prior to 1.2 bit 7 was reserved mbz */ |
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) |
230,6 → 246,9 |
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) |
# define DP_AUTOMATED_TEST_REQUEST (1 << 1) |
# define DP_CP_IRQ (1 << 2) |
# define DP_MCCS_IRQ (1 << 3) |
# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */ |
# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */ |
# define DP_SINK_SPECIFIC_IRQ (1 << 6) |
#define DP_LANE0_1_STATUS 0x202 |
279,11 → 298,30 |
#define DP_TEST_PATTERN 0x221 |
#define DP_TEST_CRC_R_CR 0x240 |
#define DP_TEST_CRC_G_Y 0x242 |
#define DP_TEST_CRC_B_CB 0x244 |
#define DP_TEST_SINK_MISC 0x246 |
#define DP_TEST_CRC_SUPPORTED (1 << 5) |
#define DP_TEST_RESPONSE 0x260 |
# define DP_TEST_ACK (1 << 0) |
# define DP_TEST_NAK (1 << 1) |
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) |
#define DP_TEST_EDID_CHECKSUM 0x261 |
#define DP_TEST_SINK 0x270 |
#define DP_TEST_SINK_START (1 << 0) |
#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ |
# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) |
# define DP_PAYLOAD_ACT_HANDLED (1 << 1) |
#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ |
/* up to ID_SLOT_63 at 0x2ff */ |
#define DP_SOURCE_OUI 0x300 |
#define DP_SINK_OUI 0x400 |
#define DP_BRANCH_OUI 0x500 |
291,7 → 329,23 |
#define DP_SET_POWER 0x600 |
# define DP_SET_POWER_D0 0x1 |
# define DP_SET_POWER_D3 0x2 |
# define DP_SET_POWER_MASK 0x3 |
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ |
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ |
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ |
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ |
#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */ |
/* 0-5 sink count */ |
# define DP_SINK_COUNT_CP_READY (1 << 6) |
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */ |
#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ |
#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ |
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ |
# define DP_PSR_LINK_CRC_ERROR (1 << 0) |
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) |
308,6 → 362,43 |
# define DP_PSR_SINK_INTERNAL_ERROR 7 |
# define DP_PSR_SINK_STATE_MASK 0x07 |
/* DP 1.2 Sideband message defines */ |
/* peer device type - DP 1.2a Table 2-92 */ |
#define DP_PEER_DEVICE_NONE 0x0 |
#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1 |
#define DP_PEER_DEVICE_MST_BRANCHING 0x2 |
#define DP_PEER_DEVICE_SST_SINK 0x3 |
#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 |
/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ |
#define DP_LINK_ADDRESS 0x01 |
#define DP_CONNECTION_STATUS_NOTIFY 0x02 |
#define DP_ENUM_PATH_RESOURCES 0x10 |
#define DP_ALLOCATE_PAYLOAD 0x11 |
#define DP_QUERY_PAYLOAD 0x12 |
#define DP_RESOURCE_STATUS_NOTIFY 0x13 |
#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14 |
#define DP_REMOTE_DPCD_READ 0x20 |
#define DP_REMOTE_DPCD_WRITE 0x21 |
#define DP_REMOTE_I2C_READ 0x22 |
#define DP_REMOTE_I2C_WRITE 0x23 |
#define DP_POWER_UP_PHY 0x24 |
#define DP_POWER_DOWN_PHY 0x25 |
#define DP_SINK_EVENT_NOTIFY 0x30 |
#define DP_QUERY_STREAM_ENC_STATUS 0x38 |
/* DP 1.2 MST sideband nak reasons - table 2.84 */ |
#define DP_NAK_WRITE_FAILURE 0x01 |
#define DP_NAK_INVALID_READ 0x02 |
#define DP_NAK_CRC_FAILURE 0x03 |
#define DP_NAK_BAD_PARAM 0x04 |
#define DP_NAK_DEFER 0x05 |
#define DP_NAK_LINK_FAILURE 0x06 |
#define DP_NAK_NO_RESOURCES 0x07 |
#define DP_NAK_DPCD_FAIL 0x08 |
#define DP_NAK_I2C_NAK 0x09 |
#define DP_NAK_ALLOCATE_FAIL 0x0a |
#define MODE_I2C_START 1 |
#define MODE_I2C_WRITE 2 |
#define MODE_I2C_READ 4 |
398,4 → 489,124 |
(dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP); |
} |
/* |
* DisplayPort AUX channel |
*/ |
/** |
* struct drm_dp_aux_msg - DisplayPort AUX channel transaction |
* @address: address of the (first) register to access |
* @request: contains the type of transaction (see DP_AUX_* macros) |
* @reply: upon completion, contains the reply type of the transaction |
* @buffer: pointer to a transmission or reception buffer |
* @size: size of @buffer |
*/ |
struct drm_dp_aux_msg { |
unsigned int address; |
u8 request; |
u8 reply; |
void *buffer; |
size_t size; |
}; |
/** |
* struct drm_dp_aux - DisplayPort AUX channel |
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter |
* @ddc: I2C adapter that can be used for I2C-over-AUX communication |
* @dev: pointer to struct device that is the parent for this AUX channel |
* @hw_mutex: internal mutex used for locking transfers |
* @transfer: transfers a message representing a single AUX transaction |
* |
* The .dev field should be set to a pointer to the device that implements |
* the AUX channel. |
* |
* The .name field may be used to specify the name of the I2C adapter. If set to |
* NULL, dev_name() of .dev will be used. |
* |
* Drivers provide a hardware-specific implementation of how transactions |
* are executed via the .transfer() function. A pointer to a drm_dp_aux_msg |
* structure describing the transaction is passed into this function. Upon |
* success, the implementation should return the number of payload bytes |
* that were transferred, or a negative error-code on failure. Helpers |
* propagate errors from the .transfer() function, with the exception of |
* the -EBUSY error, which causes a transaction to be retried. On a short, |
* helpers will return -EPROTO to make it simpler to check for failure. |
* |
* An AUX channel can also be used to transport I2C messages to a sink. A |
* typical application of that is to access an EDID that's present in the |
* sink device. The .transfer() function can also be used to execute such |
* transactions. The drm_dp_aux_register_i2c_bus() function registers an |
* I2C adapter that can be passed to drm_probe_ddc(). Upon removal, drivers |
* should call drm_dp_aux_unregister_i2c_bus() to remove the I2C adapter. |
* |
* Note that the aux helper code assumes that the .transfer() function |
* only modifies the reply field of the drm_dp_aux_msg structure. The |
* retry logic and i2c helpers assume this is the case. |
*/ |
struct drm_dp_aux { |
const char *name; |
struct i2c_adapter ddc; |
struct device *dev; |
struct mutex hw_mutex; |
ssize_t (*transfer)(struct drm_dp_aux *aux, |
struct drm_dp_aux_msg *msg); |
}; |
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, |
void *buffer, size_t size); |
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, |
void *buffer, size_t size); |
/** |
* drm_dp_dpcd_readb() - read a single byte from the DPCD |
* @aux: DisplayPort AUX channel |
* @offset: address of the register to read |
* @valuep: location where the value of the register will be stored |
* |
* Returns the number of bytes transferred (1) on success, or a negative |
* error code on failure. |
*/ |
static inline ssize_t drm_dp_dpcd_readb(struct drm_dp_aux *aux, |
unsigned int offset, u8 *valuep) |
{ |
return drm_dp_dpcd_read(aux, offset, valuep, 1); |
} |
/** |
* drm_dp_dpcd_writeb() - write a single byte to the DPCD |
* @aux: DisplayPort AUX channel |
* @offset: address of the register to write |
* @value: value to write to the register |
* |
* Returns the number of bytes transferred (1) on success, or a negative |
* error code on failure. |
*/ |
static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux, |
unsigned int offset, u8 value) |
{ |
return drm_dp_dpcd_write(aux, offset, &value, 1); |
} |
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, |
u8 status[DP_LINK_STATUS_SIZE]); |
/* |
* DisplayPort link |
*/ |
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0) |
struct drm_dp_link { |
unsigned char revision; |
unsigned int rate; |
unsigned int num_lanes; |
unsigned long capabilities; |
}; |
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); |
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); |
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); |
int drm_dp_aux_register(struct drm_dp_aux *aux); |
void drm_dp_aux_unregister(struct drm_dp_aux *aux); |
#endif /* _DRM_DP_HELPER_H_ */ |
/drivers/include/drm/drm_dp_mst_helper.h |
---|
0,0 → 1,509 |
/* |
* Copyright © 2014 Red Hat. |
* |
* Permission to use, copy, modify, distribute, and sell this software and its |
* documentation for any purpose is hereby granted without fee, provided that |
* the above copyright notice appear in all copies and that both that copyright |
* notice and this permission notice appear in supporting documentation, and |
* that the name of the copyright holders not be used in advertising or |
* publicity pertaining to distribution of the software without specific, |
* written prior permission. The copyright holders make no representations |
* about the suitability of this software for any purpose. It is provided "as |
* is" without express or implied warranty. |
* |
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, |
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO |
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR |
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, |
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER |
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE |
* OF THIS SOFTWARE. |
*/ |
#ifndef _DRM_DP_MST_HELPER_H_ |
#define _DRM_DP_MST_HELPER_H_ |
#include <linux/types.h> |
#include <drm/drm_dp_helper.h> |
struct drm_dp_mst_branch; |
/** |
* struct drm_dp_vcpi - Virtual Channel Payload Identifer |
* @vcpi: Virtual channel ID. |
* @pbn: Payload Bandwidth Number for this channel |
* @aligned_pbn: PBN aligned with slot size |
* @num_slots: number of slots for this PBN |
*/ |
struct drm_dp_vcpi { |
int vcpi; |
int pbn; |
int aligned_pbn; |
int num_slots; |
}; |
/** |
* struct drm_dp_mst_port - MST port |
* @kref: reference count for this port. |
* @guid_valid: for DP 1.2 devices if we have validated the GUID. |
* @guid: guid for DP 1.2 device on this port. |
* @port_num: port number |
* @input: if this port is an input port. |
* @mcs: message capability status - DP 1.2 spec. |
* @ddps: DisplayPort Device Plug Status - DP 1.2 |
* @pdt: Peer Device Type |
* @ldps: Legacy Device Plug Status |
* @dpcd_rev: DPCD revision of device on this port |
* @num_sdp_streams: Number of simultaneous streams |
* @num_sdp_stream_sinks: Number of stream sinks |
* @available_pbn: Available bandwidth for this port. |
* @next: link to next port on this branch device |
* @mstb: branch device attach below this port |
* @aux: i2c aux transport to talk to device connected to this port. |
* @parent: branch device parent of this port |
* @vcpi: Virtual Channel Payload info for this port. |
* @connector: DRM connector this port is connected to. |
* @mgr: topology manager this port lives under. |
* |
* This structure represents an MST port endpoint on a device somewhere |
* in the MST topology. |
*/ |
struct drm_dp_mst_port { |
struct kref kref; |
/* if dpcd 1.2 device is on this port - its GUID info */ |
bool guid_valid; |
u8 guid[16]; |
u8 port_num; |
bool input; |
bool mcs; |
bool ddps; |
u8 pdt; |
bool ldps; |
u8 dpcd_rev; |
u8 num_sdp_streams; |
u8 num_sdp_stream_sinks; |
uint16_t available_pbn; |
struct list_head next; |
struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */ |
struct drm_dp_aux aux; /* i2c bus for this port? */ |
struct drm_dp_mst_branch *parent; |
struct drm_dp_vcpi vcpi; |
struct drm_connector *connector; |
struct drm_dp_mst_topology_mgr *mgr; |
}; |
/** |
* struct drm_dp_mst_branch - MST branch device. |
* @kref: reference count for this port. |
* @rad: Relative Address to talk to this branch device. |
* @lct: Link count total to talk to this branch device. |
* @num_ports: number of ports on the branch. |
* @msg_slots: one bit per transmitted msg slot. |
* @ports: linked list of ports on this branch. |
* @port_parent: pointer to the port parent, NULL if toplevel. |
* @mgr: topology manager for this branch device. |
* @tx_slots: transmission slots for this device. |
* @last_seqno: last sequence number used to talk to this. |
* @link_address_sent: if a link address message has been sent to this device yet. |
* |
* This structure represents an MST branch device, there is one |
* primary branch device at the root, along with any others connected |
* to downstream ports |
*/ |
struct drm_dp_mst_branch { |
struct kref kref; |
u8 rad[8]; |
u8 lct; |
int num_ports; |
int msg_slots; |
struct list_head ports; |
/* list of tx ops queue for this port */ |
struct drm_dp_mst_port *port_parent; |
struct drm_dp_mst_topology_mgr *mgr; |
/* slots are protected by mstb->mgr->qlock */ |
struct drm_dp_sideband_msg_tx *tx_slots[2]; |
int last_seqno; |
bool link_address_sent; |
}; |
/* sideband msg header - not bit struct */ |
struct drm_dp_sideband_msg_hdr { |
u8 lct; |
u8 lcr; |
u8 rad[8]; |
bool broadcast; |
bool path_msg; |
u8 msg_len; |
bool somt; |
bool eomt; |
bool seqno; |
}; |
struct drm_dp_nak_reply { |
u8 guid[16]; |
u8 reason; |
u8 nak_data; |
}; |
struct drm_dp_link_address_ack_reply { |
u8 guid[16]; |
u8 nports; |
struct drm_dp_link_addr_reply_port { |
bool input_port; |
u8 peer_device_type; |
u8 port_number; |
bool mcs; |
bool ddps; |
bool legacy_device_plug_status; |
u8 dpcd_revision; |
u8 peer_guid[16]; |
u8 num_sdp_streams; |
u8 num_sdp_stream_sinks; |
} ports[16]; |
}; |
struct drm_dp_remote_dpcd_read_ack_reply { |
u8 port_number; |
u8 num_bytes; |
u8 bytes[255]; |
}; |
struct drm_dp_remote_dpcd_write_ack_reply { |
u8 port_number; |
}; |
struct drm_dp_remote_dpcd_write_nak_reply { |
u8 port_number; |
u8 reason; |
u8 bytes_written_before_failure; |
}; |
struct drm_dp_remote_i2c_read_ack_reply { |
u8 port_number; |
u8 num_bytes; |
u8 bytes[255]; |
}; |
struct drm_dp_remote_i2c_read_nak_reply { |
u8 port_number; |
u8 nak_reason; |
u8 i2c_nak_transaction; |
}; |
struct drm_dp_remote_i2c_write_ack_reply { |
u8 port_number; |
}; |
struct drm_dp_sideband_msg_rx { |
u8 chunk[48]; |
u8 msg[256]; |
u8 curchunk_len; |
u8 curchunk_idx; /* chunk we are parsing now */ |
u8 curchunk_hdrlen; |
u8 curlen; /* total length of the msg */ |
bool have_somt; |
bool have_eomt; |
struct drm_dp_sideband_msg_hdr initial_hdr; |
}; |
struct drm_dp_allocate_payload { |
u8 port_number; |
u8 number_sdp_streams; |
u8 vcpi; |
u16 pbn; |
u8 sdp_stream_sink[8]; |
}; |
struct drm_dp_allocate_payload_ack_reply { |
u8 port_number; |
u8 vcpi; |
u16 allocated_pbn; |
}; |
struct drm_dp_connection_status_notify { |
u8 guid[16]; |
u8 port_number; |
bool legacy_device_plug_status; |
bool displayport_device_plug_status; |
bool message_capability_status; |
bool input_port; |
u8 peer_device_type; |
}; |
struct drm_dp_remote_dpcd_read { |
u8 port_number; |
u32 dpcd_address; |
u8 num_bytes; |
}; |
struct drm_dp_remote_dpcd_write { |
u8 port_number; |
u32 dpcd_address; |
u8 num_bytes; |
u8 *bytes; |
}; |
struct drm_dp_remote_i2c_read { |
u8 num_transactions; |
u8 port_number; |
struct { |
u8 i2c_dev_id; |
u8 num_bytes; |
u8 *bytes; |
u8 no_stop_bit; |
u8 i2c_transaction_delay; |
} transactions[4]; |
u8 read_i2c_device_id; |
u8 num_bytes_read; |
}; |
struct drm_dp_remote_i2c_write { |
u8 port_number; |
u8 write_i2c_device_id; |
u8 num_bytes; |
u8 *bytes; |
}; |
/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ |
struct drm_dp_port_number_req { |
u8 port_number; |
}; |
struct drm_dp_enum_path_resources_ack_reply { |
u8 port_number; |
u16 full_payload_bw_number; |
u16 avail_payload_bw_number; |
}; |
/* covers POWER_DOWN_PHY, POWER_UP_PHY */ |
struct drm_dp_port_number_rep { |
u8 port_number; |
}; |
struct drm_dp_query_payload { |
u8 port_number; |
u8 vcpi; |
}; |
struct drm_dp_resource_status_notify { |
u8 port_number; |
u8 guid[16]; |
u16 available_pbn; |
}; |
struct drm_dp_query_payload_ack_reply { |
u8 port_number; |
u8 allocated_pbn; |
}; |
struct drm_dp_sideband_msg_req_body { |
u8 req_type; |
union ack_req { |
struct drm_dp_connection_status_notify conn_stat; |
struct drm_dp_port_number_req port_num; |
struct drm_dp_resource_status_notify resource_stat; |
struct drm_dp_query_payload query_payload; |
struct drm_dp_allocate_payload allocate_payload; |
struct drm_dp_remote_dpcd_read dpcd_read; |
struct drm_dp_remote_dpcd_write dpcd_write; |
struct drm_dp_remote_i2c_read i2c_read; |
struct drm_dp_remote_i2c_write i2c_write; |
} u; |
}; |
struct drm_dp_sideband_msg_reply_body { |
u8 reply_type; |
u8 req_type; |
union ack_replies { |
struct drm_dp_nak_reply nak; |
struct drm_dp_link_address_ack_reply link_addr; |
struct drm_dp_port_number_rep port_number; |
struct drm_dp_enum_path_resources_ack_reply path_resources; |
struct drm_dp_allocate_payload_ack_reply allocate_payload; |
struct drm_dp_query_payload_ack_reply query_payload; |
struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; |
struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; |
struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; |
struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; |
struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; |
struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; |
} u; |
}; |
/* msg is queued to be put into a slot */ |
#define DRM_DP_SIDEBAND_TX_QUEUED 0 |
/* msg has started transmitting on a slot - still on msgq */ |
#define DRM_DP_SIDEBAND_TX_START_SEND 1 |
/* msg has finished transmitting on a slot - removed from msgq only in slot */ |
#define DRM_DP_SIDEBAND_TX_SENT 2 |
/* msg has received a response - removed from slot */ |
#define DRM_DP_SIDEBAND_TX_RX 3 |
#define DRM_DP_SIDEBAND_TX_TIMEOUT 4 |
struct drm_dp_sideband_msg_tx { |
u8 msg[256]; |
u8 chunk[48]; |
u8 cur_offset; |
u8 cur_len; |
struct drm_dp_mst_branch *dst; |
struct list_head next; |
int seqno; |
int state; |
bool path_msg; |
struct drm_dp_sideband_msg_reply_body reply; |
}; |
/* sideband msg handler */ |
struct drm_dp_mst_topology_mgr; |
struct drm_dp_mst_topology_cbs { |
/* create a connector for a port */ |
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path); |
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_connector *connector); |
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); |
}; |
#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) |
#define DP_PAYLOAD_LOCAL 1 |
#define DP_PAYLOAD_REMOTE 2 |
#define DP_PAYLOAD_DELETE_LOCAL 3 |
struct drm_dp_payload { |
int payload_state; |
int start_slot; |
int num_slots; |
}; |
/** |
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager |
* @dev: device pointer for adding i2c devices etc. |
* @cbs: callbacks for connector addition and destruction. |
* @max_dpcd_transaction_bytes - maximum number of bytes to read/write in one go. |
* @aux: aux channel for the DP connector. |
* @max_payloads: maximum number of payloads the GPU can generate. |
* @conn_base_id: DRM connector ID this mgr is connected to. |
* @down_rep_recv: msg receiver state for down replies. |
* @up_req_recv: msg receiver state for up requests. |
* @lock: protects mst state, primary, guid, dpcd. |
* @mst_state: if this manager is enabled for an MST capable port. |
* @mst_primary: pointer to the primary branch device. |
* @guid_valid: GUID valid for the primary branch device. |
* @guid: GUID for primary port. |
* @dpcd: cache of DPCD for primary port. |
* @pbn_div: PBN to slots divisor. |
* |
* This struct represents the toplevel displayport MST topology manager. |
* There should be one instance of this for every MST capable DP connector |
* on the GPU. |
*/ |
struct drm_dp_mst_topology_mgr { |
struct device *dev; |
struct drm_dp_mst_topology_cbs *cbs; |
int max_dpcd_transaction_bytes; |
struct drm_dp_aux *aux; /* auxch for this topology mgr to use */ |
int max_payloads; |
int conn_base_id; |
/* only ever accessed from the workqueue - which should be serialised */ |
struct drm_dp_sideband_msg_rx down_rep_recv; |
struct drm_dp_sideband_msg_rx up_req_recv; |
/* pointer to info about the initial MST device */ |
struct mutex lock; /* protects mst_state + primary + guid + dpcd */ |
bool mst_state; |
struct drm_dp_mst_branch *mst_primary; |
/* primary MST device GUID */ |
bool guid_valid; |
u8 guid[16]; |
u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
u8 sink_count; |
int pbn_div; |
int total_slots; |
int avail_slots; |
int total_pbn; |
/* messages to be transmitted */ |
/* qlock protects the upq/downq and in_progress, |
the mstb tx_slots and txmsg->state once they are queued */ |
struct mutex qlock; |
struct list_head tx_msg_downq; |
struct list_head tx_msg_upq; |
bool tx_down_in_progress; |
bool tx_up_in_progress; |
/* payload info + lock for it */ |
struct mutex payload_lock; |
struct drm_dp_vcpi **proposed_vcpis; |
struct drm_dp_payload *payloads; |
unsigned long payload_mask; |
wait_queue_head_t tx_waitq; |
struct work_struct work; |
struct work_struct tx_work; |
}; |
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id); |
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); |
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); |
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); |
enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
int drm_dp_calc_pbn_mode(int clock, int bpp); |
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots); |
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, |
struct drm_dp_mst_port *port); |
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, |
int pbn); |
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); |
int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); |
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); |
void drm_dp_mst_dump_topology(struct seq_file *m, |
struct drm_dp_mst_topology_mgr *mgr); |
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); |
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr); |
#endif |
/drivers/include/drm/drm_edid.h |
---|
202,6 → 202,11 |
#define DRM_EDID_FEATURE_PM_SUSPEND (1 << 6) |
#define DRM_EDID_FEATURE_PM_STANDBY (1 << 7) |
#define DRM_EDID_HDMI_DC_48 (1 << 6) |
#define DRM_EDID_HDMI_DC_36 (1 << 5) |
#define DRM_EDID_HDMI_DC_30 (1 << 4) |
#define DRM_EDID_HDMI_DC_Y444 (1 << 3) |
struct edid { |
u8 header[8]; |
/* Vendor & product info */ |
/drivers/include/drm/drm_fb_helper.h |
---|
32,6 → 32,7 |
struct drm_fb_helper; |
#include <linux/kgdb.h> |
struct drm_fb_helper_crtc { |
struct drm_mode_set mode_set; |
54,7 → 55,7 |
* save the current lut when force-restoring the fbdev for e.g. |
* kdbg. |
* @fb_probe: Driver callback to allocate and initialize the fbdev info |
* structure. Futhermore it also needs to allocate the drm |
* structure. Furthermore it also needs to allocate the drm |
* framebuffer used to back the fbdev. |
* @initial_config: Setup an initial fbdev display configuration |
* |
85,8 → 86,9 |
int crtc_count; |
struct drm_fb_helper_crtc *crtc_info; |
int connector_count; |
int connector_info_alloc_count; |
struct drm_fb_helper_connector **connector_info; |
struct drm_fb_helper_funcs *funcs; |
const struct drm_fb_helper_funcs *funcs; |
struct fb_info *fbdev; |
u32 pseudo_palette[17]; |
struct list_head kernel_fb_list; |
96,6 → 98,8 |
bool delayed_hotplug; |
}; |
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, |
const struct drm_fb_helper_funcs *funcs); |
int drm_fb_helper_init(struct drm_device *dev, |
struct drm_fb_helper *helper, int crtc_count, |
int max_conn); |
107,7 → 111,7 |
int drm_fb_helper_check_var(struct fb_var_screeninfo *var, |
struct fb_info *info); |
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper); |
bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper); |
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, |
uint32_t fb_width, uint32_t fb_height); |
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
120,5 → 124,14 |
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper); |
int drm_fb_helper_debug_enter(struct fb_info *info); |
int drm_fb_helper_debug_leave(struct fb_info *info); |
struct drm_display_mode * |
drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, |
int width, int height); |
struct drm_display_mode * |
drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn, |
int width, int height); |
int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_connector *connector); |
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper, |
struct drm_connector *connector); |
#endif |
/drivers/include/drm/drm_mm.h |
---|
47,8 → 47,17 |
enum drm_mm_search_flags { |
DRM_MM_SEARCH_DEFAULT = 0, |
DRM_MM_SEARCH_BEST = 1 << 0, |
DRM_MM_SEARCH_BELOW = 1 << 1, |
}; |
enum drm_mm_allocator_flags { |
DRM_MM_CREATE_DEFAULT = 0, |
DRM_MM_CREATE_TOP = 1 << 0, |
}; |
#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT |
#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP |
struct drm_mm_node { |
struct list_head node_list; |
struct list_head hole_stack; |
85,11 → 94,31 |
unsigned long *start, unsigned long *end); |
}; |
/** |
* drm_mm_node_allocated - checks whether a node is allocated |
* @node: drm_mm_node to check |
* |
* Drivers should use this helpers for proper encapusulation of drm_mm |
* internals. |
* |
* Returns: |
* True if the @node is allocated. |
*/ |
static inline bool drm_mm_node_allocated(struct drm_mm_node *node) |
{ |
return node->allocated; |
} |
/** |
* drm_mm_initialized - checks whether an allocator is initialized |
* @mm: drm_mm to check |
* |
* Drivers should use this helpers for proper encapusulation of drm_mm |
* internals. |
* |
* Returns: |
* True if the @mm is initialized. |
*/ |
static inline bool drm_mm_initialized(struct drm_mm *mm) |
{ |
return mm->hole_stack.next; |
100,6 → 129,17 |
return hole_node->start + hole_node->size; |
} |
/** |
* drm_mm_hole_node_start - computes the start of the hole following @node |
* @hole_node: drm_mm_node which implicitly tracks the following hole |
* |
* This is useful for driver-sepific debug dumpers. Otherwise drivers should not |
* inspect holes themselves. Drivers must check first whether a hole indeed |
* follows by looking at node->hole_follows. |
* |
* Returns: |
* Start of the subsequent hole. |
*/ |
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
{ |
BUG_ON(!hole_node->hole_follows); |
112,18 → 152,52 |
struct drm_mm_node, node_list)->start; |
} |
/** |
* drm_mm_hole_node_end - computes the end of the hole following @node |
* @hole_node: drm_mm_node which implicitly tracks the following hole |
* |
* This is useful for driver-sepific debug dumpers. Otherwise drivers should not |
* inspect holes themselves. Drivers must check first whether a hole indeed |
* follows by looking at node->hole_follows. |
* |
* Returns: |
* End of the subsequent hole. |
*/ |
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
{ |
return __drm_mm_hole_node_end(hole_node); |
} |
/** |
* drm_mm_for_each_node - iterator to walk over all allocated nodes |
* @entry: drm_mm_node structure to assign to in each iteration step |
* @mm: drm_mm allocator to walk |
* |
* This iterator walks over all nodes in the range allocator. It is implemented |
* with list_for_each, so not save against removal of elements. |
*/ |
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ |
&(mm)->head_node.node_list, \ |
node_list) |
/* Note that we need to unroll list_for_each_entry in order to inline |
* setting hole_start and hole_end on each iteration and keep the |
* macro sane. |
/** |
* drm_mm_for_each_hole - iterator to walk over all holes |
* @entry: drm_mm_node used internally to track progress |
* @mm: drm_mm allocator to walk |
* @hole_start: ulong variable to assign the hole start to on each iteration |
* @hole_end: ulong variable to assign the hole end to on each iteration |
* |
* This iterator walks over all holes in the range allocator. It is implemented |
* with list_for_each, so not save against removal of elements. @entry is used |
* internally and will not reflect a real drm_mm_node for the very first hole. |
* Hence users of this iterator may not access it. |
* |
* Implementation Note: |
* We need to inline list_for_each_entry in order to be able to set hole_start |
* and hole_end on each iteration while keeping the macro sane. |
* |
* The __drm_mm_for_each_hole version is similar, but with added support for |
* going backwards. |
*/ |
#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ |
for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ |
133,17 → 207,42 |
1 : 0; \ |
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack)) |
#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ |
for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ |
&entry->hole_stack != &(mm)->hole_stack ? \ |
hole_start = drm_mm_hole_node_start(entry), \ |
hole_end = drm_mm_hole_node_end(entry), \ |
1 : 0; \ |
entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) |
/* |
* Basic range manager support (drm_mm.c) |
*/ |
extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); |
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); |
extern int drm_mm_insert_node_generic(struct drm_mm *mm, |
int drm_mm_insert_node_generic(struct drm_mm *mm, |
struct drm_mm_node *node, |
unsigned long size, |
unsigned alignment, |
unsigned long color, |
enum drm_mm_search_flags flags); |
enum drm_mm_search_flags sflags, |
enum drm_mm_allocator_flags aflags); |
/** |
* drm_mm_insert_node - search for space and insert @node |
* @mm: drm_mm to allocate from |
* @node: preallocate node to insert |
* @size: size of the allocation |
* @alignment: alignment of the allocation |
* @flags: flags to fine-tune the allocation |
* |
* This is a simplified version of drm_mm_insert_node_generic() with @color set |
* to 0. |
* |
* The preallocated node must be cleared to 0. |
* |
* Returns: |
* 0 on success, -ENOSPC if there's no suitable hole. |
*/ |
static inline int drm_mm_insert_node(struct drm_mm *mm, |
struct drm_mm_node *node, |
unsigned long size, |
150,10 → 249,11 |
unsigned alignment, |
enum drm_mm_search_flags flags) |
{ |
return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags); |
return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, |
DRM_MM_CREATE_DEFAULT); |
} |
extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, |
struct drm_mm_node *node, |
unsigned long size, |
unsigned alignment, |
160,7 → 260,26 |
unsigned long color, |
unsigned long start, |
unsigned long end, |
enum drm_mm_search_flags flags); |
enum drm_mm_search_flags sflags, |
enum drm_mm_allocator_flags aflags); |
/** |
* drm_mm_insert_node_in_range - ranged search for space and insert @node |
* @mm: drm_mm to allocate from |
* @node: preallocate node to insert |
* @size: size of the allocation |
* @alignment: alignment of the allocation |
* @start: start of the allowed range for this node |
* @end: end of the allowed range for this node |
* @flags: flags to fine-tune the allocation |
* |
* This is a simplified version of drm_mm_insert_node_in_range_generic() with |
* @color set to 0. |
* |
* The preallocated node must be cleared to 0. |
* |
* Returns: |
* 0 on success, -ENOSPC if there's no suitable hole. |
*/ |
static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, |
struct drm_mm_node *node, |
unsigned long size, |
170,16 → 289,17 |
enum drm_mm_search_flags flags) |
{ |
return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, |
0, start, end, flags); |
0, start, end, flags, |
DRM_MM_CREATE_DEFAULT); |
} |
extern void drm_mm_remove_node(struct drm_mm_node *node); |
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
extern void drm_mm_init(struct drm_mm *mm, |
void drm_mm_remove_node(struct drm_mm_node *node); |
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); |
void drm_mm_init(struct drm_mm *mm, |
unsigned long start, |
unsigned long size); |
extern void drm_mm_takedown(struct drm_mm *mm); |
extern int drm_mm_clean(struct drm_mm *mm); |
void drm_mm_takedown(struct drm_mm *mm); |
bool drm_mm_clean(struct drm_mm *mm); |
void drm_mm_init_scan(struct drm_mm *mm, |
unsigned long size, |
191,10 → 311,10 |
unsigned long color, |
unsigned long start, |
unsigned long end); |
int drm_mm_scan_add_block(struct drm_mm_node *node); |
int drm_mm_scan_remove_block(struct drm_mm_node *node); |
bool drm_mm_scan_add_block(struct drm_mm_node *node); |
bool drm_mm_scan_remove_block(struct drm_mm_node *node); |
extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); |
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); |
#ifdef CONFIG_DEBUG_FS |
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); |
#endif |
/drivers/include/drm/drm_modes.h |
---|
0,0 → 1,237 |
/* |
* Copyright © 2006 Keith Packard |
* Copyright © 2007-2008 Dave Airlie |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* Copyright © 2014 Intel Corporation |
* Daniel Vetter <daniel.vetter@ffwll.ch> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef __DRM_MODES_H__ |
#define __DRM_MODES_H__ |
/* |
* Note on terminology: here, for brevity and convenience, we refer to connector |
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, |
* DVI, etc. And 'screen' refers to the whole of the visible display, which |
* may span multiple monitors (and therefore multiple CRTC and connector |
* structures). |
*/ |
enum drm_mode_status { |
MODE_OK = 0, /* Mode OK */ |
MODE_HSYNC, /* hsync out of range */ |
MODE_VSYNC, /* vsync out of range */ |
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_BAD_WIDTH, /* requires an unsupported linepitch */ |
MODE_NOMODE, /* no mode with a matching name */ |
MODE_NO_INTERLACE, /* interlaced mode not supported */ |
MODE_NO_DBLESCAN, /* doublescan mode not supported */ |
MODE_NO_VSCAN, /* multiscan mode not supported */ |
MODE_MEM, /* insufficient video memory */ |
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ |
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ |
MODE_MEM_VIRT, /* insufficient video memory given virtual size */ |
MODE_NOCLOCK, /* no fixed clock available */ |
MODE_CLOCK_HIGH, /* clock required is too high */ |
MODE_CLOCK_LOW, /* clock required is too low */ |
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ |
MODE_BAD_HVALUE, /* horizontal timing was out of range */ |
MODE_BAD_VVALUE, /* vertical timing was out of range */ |
MODE_BAD_VSCAN, /* VScan value out of range */ |
MODE_HSYNC_NARROW, /* horizontal sync too narrow */ |
MODE_HSYNC_WIDE, /* horizontal sync too wide */ |
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ |
MODE_HBLANK_WIDE, /* horizontal blanking too wide */ |
MODE_VSYNC_NARROW, /* vertical sync too narrow */ |
MODE_VSYNC_WIDE, /* vertical sync too wide */ |
MODE_VBLANK_NARROW, /* vertical blanking too narrow */ |
MODE_VBLANK_WIDE, /* vertical blanking too wide */ |
MODE_PANEL, /* exceeds panel dimensions */ |
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ |
MODE_ONE_WIDTH, /* only one width is supported */ |
MODE_ONE_HEIGHT, /* only one height is supported */ |
MODE_ONE_SIZE, /* only one resolution is supported */ |
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ |
MODE_NO_STEREO, /* stereo modes not supported */ |
MODE_UNVERIFIED = -3, /* mode needs to reverified */ |
MODE_BAD = -2, /* unspecified reason */ |
MODE_ERROR = -1 /* error condition */ |
}; |
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ |
DRM_MODE_TYPE_CRTC_C) |
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ |
.name = nm, .status = 0, .type = (t), .clock = (c), \ |
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ |
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ |
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ |
.vscan = (vs), .flags = (f), \ |
.base.type = DRM_MODE_OBJECT_MODE |
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */ |
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */ |
#define DRM_MODE_FLAG_3D_MAX DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF |
struct drm_display_mode { |
/* Header */ |
struct list_head head; |
struct drm_mode_object base; |
char name[DRM_DISPLAY_MODE_LEN]; |
enum drm_mode_status status; |
unsigned int type; |
/* Proposed mode values */ |
int clock; /* in kHz */ |
int hdisplay; |
int hsync_start; |
int hsync_end; |
int htotal; |
int hskew; |
int vdisplay; |
int vsync_start; |
int vsync_end; |
int vtotal; |
int vscan; |
unsigned int flags; |
/* Addressable image size (may be 0 for projectors, etc.) */ |
int width_mm; |
int height_mm; |
/* Actual mode we give to hw */ |
int crtc_clock; /* in KHz */ |
int crtc_hdisplay; |
int crtc_hblank_start; |
int crtc_hblank_end; |
int crtc_hsync_start; |
int crtc_hsync_end; |
int crtc_htotal; |
int crtc_hskew; |
int crtc_vdisplay; |
int crtc_vblank_start; |
int crtc_vblank_end; |
int crtc_vsync_start; |
int crtc_vsync_end; |
int crtc_vtotal; |
/* Driver private mode info */ |
int *private; |
int private_flags; |
int vrefresh; /* in Hz */ |
int hsync; /* in kHz */ |
enum hdmi_picture_aspect picture_aspect_ratio; |
}; |
/* mode specified on the command line */ |
struct drm_cmdline_mode { |
bool specified; |
bool refresh_specified; |
bool bpp_specified; |
int xres, yres; |
int bpp; |
int refresh; |
bool rb; |
bool interlace; |
bool cvt; |
bool margins; |
enum drm_connector_force force; |
}; |
/** |
* drm_mode_is_stereo - check for stereo mode flags |
* @mode: drm_display_mode to check |
* |
* Returns: |
* True if the mode is one of the stereo modes (like side-by-side), false if |
* not. |
*/ |
static inline bool drm_mode_is_stereo(const struct drm_display_mode *mode) |
{ |
return mode->flags & DRM_MODE_FLAG_3D_MASK; |
} |
struct drm_connector; |
struct drm_cmdline_mode; |
struct drm_display_mode *drm_mode_create(struct drm_device *dev); |
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); |
void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); |
void drm_mode_debug_printmodeline(const struct drm_display_mode *mode); |
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool reduced, bool interlaced, |
bool margins); |
struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, |
int hdisplay, int vdisplay, int vrefresh, |
bool interlaced, int margins); |
struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev, |
int hdisplay, int vdisplay, |
int vrefresh, bool interlaced, |
int margins, |
int GTF_M, int GTF_2C, |
int GTF_K, int GTF_2J); |
void drm_display_mode_from_videomode(const struct videomode *vm, |
struct drm_display_mode *dmode); |
int of_get_drm_display_mode(struct device_node *np, |
struct drm_display_mode *dmode, |
int index); |
void drm_mode_set_name(struct drm_display_mode *mode); |
int drm_mode_hsync(const struct drm_display_mode *mode); |
int drm_mode_vrefresh(const struct drm_display_mode *mode); |
void drm_mode_set_crtcinfo(struct drm_display_mode *p, |
int adjust_flags); |
void drm_mode_copy(struct drm_display_mode *dst, |
const struct drm_display_mode *src); |
struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
const struct drm_display_mode *mode); |
bool drm_mode_equal(const struct drm_display_mode *mode1, |
const struct drm_display_mode *mode2); |
bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1, |
const struct drm_display_mode *mode2); |
/* for use by the crtc helper probe functions */ |
void drm_mode_validate_size(struct drm_device *dev, |
struct list_head *mode_list, |
int maxX, int maxY); |
void drm_mode_prune_invalid(struct drm_device *dev, |
struct list_head *mode_list, bool verbose); |
void drm_mode_sort(struct list_head *mode_list); |
void drm_mode_connector_list_update(struct drm_connector *connector, bool merge_type_bits); |
/* parsing cmdline modes */ |
bool |
drm_mode_parse_command_line_for_connector(const char *mode_option, |
struct drm_connector *connector, |
struct drm_cmdline_mode *mode); |
struct drm_display_mode * |
drm_mode_create_from_cmdline_mode(struct drm_device *dev, |
struct drm_cmdline_mode *cmd); |
#endif /* __DRM_MODES_H__ */ |
/drivers/include/drm/drm_modeset_lock.h |
---|
0,0 → 1,126 |
/* |
* Copyright (C) 2014 Red Hat |
* Author: Rob Clark <robdclark@gmail.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef DRM_MODESET_LOCK_H_ |
#define DRM_MODESET_LOCK_H_ |
#include <linux/ww_mutex.h> |
struct drm_modeset_lock; |
/** |
* drm_modeset_acquire_ctx - locking context (see ww_acquire_ctx) |
* @ww_ctx: base acquire ctx |
* @contended: used internally for -EDEADLK handling |
* @locked: list of held locks |
* |
* Each thread competing for a set of locks must use one acquire |
* ctx. And if any lock fxn returns -EDEADLK, it must backoff and |
* retry. |
*/ |
struct drm_modeset_acquire_ctx { |
struct ww_acquire_ctx ww_ctx; |
/** |
* Contended lock: if a lock is contended you should only call |
* drm_modeset_backoff() which drops locks and slow-locks the |
* contended lock. |
*/ |
struct drm_modeset_lock *contended; |
/** |
* list of held locks (drm_modeset_lock) |
*/ |
struct list_head locked; |
}; |
/** |
* drm_modeset_lock - used for locking modeset resources. |
* @mutex: resource locking |
* @head: used to hold it's place on state->locked list when |
* part of an atomic update |
* |
* Used for locking CRTCs and other modeset resources. |
*/ |
struct drm_modeset_lock { |
/** |
* modeset lock |
*/ |
struct ww_mutex mutex; |
/** |
* Resources that are locked as part of an atomic update are added |
* to a list (so we know what to unlock at the end). |
*/ |
struct list_head head; |
}; |
extern struct ww_class crtc_ww_class; |
void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, |
uint32_t flags); |
void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx); |
void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx); |
void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx); |
int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx); |
/** |
* drm_modeset_lock_init - initialize lock |
* @lock: lock to init |
*/ |
static inline void drm_modeset_lock_init(struct drm_modeset_lock *lock) |
{ |
ww_mutex_init(&lock->mutex, &crtc_ww_class); |
INIT_LIST_HEAD(&lock->head); |
} |
/** |
* drm_modeset_lock_fini - cleanup lock |
* @lock: lock to cleanup |
*/ |
static inline void drm_modeset_lock_fini(struct drm_modeset_lock *lock) |
{ |
WARN_ON(!list_empty(&lock->head)); |
} |
/** |
* drm_modeset_is_locked - equivalent to mutex_is_locked() |
* @lock: lock to check |
*/ |
static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock) |
{ |
return ww_mutex_is_locked(&lock->mutex); |
} |
int drm_modeset_lock(struct drm_modeset_lock *lock, |
struct drm_modeset_acquire_ctx *ctx); |
int drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, |
struct drm_modeset_acquire_ctx *ctx); |
void drm_modeset_unlock(struct drm_modeset_lock *lock); |
struct drm_device; |
int drm_modeset_lock_all_crtcs(struct drm_device *dev, |
struct drm_modeset_acquire_ctx *ctx); |
#endif /* DRM_MODESET_LOCK_H_ */ |
/drivers/include/drm/drm_pciids.h |
---|
637,6 → 637,22 |
{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
/drivers/include/drm/drm_plane_helper.h |
---|
0,0 → 1,71 |
/* |
* Copyright (C) 2011-2013 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
*/ |
#ifndef DRM_PLANE_HELPER_H |
#define DRM_PLANE_HELPER_H |
#include <drm/drm_rect.h> |
/* |
* Drivers that don't allow primary plane scaling may pass this macro in place |
* of the min/max scale parameters of the update checker function. |
* |
* Due to src being in 16.16 fixed point and dest being in integer pixels, |
* 1<<16 represents no scaling. |
*/ |
#define DRM_PLANE_HELPER_NO_SCALING (1<<16) |
/** |
* DOC: plane helpers |
* |
* Helper functions to assist with creation and handling of CRTC primary |
* planes. |
*/ |
extern int drm_plane_helper_check_update(struct drm_plane *plane, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_rect *src, |
struct drm_rect *dest, |
const struct drm_rect *clip, |
int min_scale, |
int max_scale, |
bool can_position, |
bool can_update_disabled, |
bool *visible); |
extern int drm_primary_helper_update(struct drm_plane *plane, |
struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
int crtc_x, int crtc_y, |
unsigned int crtc_w, unsigned int crtc_h, |
uint32_t src_x, uint32_t src_y, |
uint32_t src_w, uint32_t src_h); |
extern int drm_primary_helper_disable(struct drm_plane *plane); |
extern void drm_primary_helper_destroy(struct drm_plane *plane); |
extern const struct drm_plane_funcs drm_primary_helper_funcs; |
extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, |
const uint32_t *formats, |
int num_formats); |
#endif |
/drivers/include/drm/drm_rect.h |
---|
163,5 → 163,11 |
struct drm_rect *dst, |
int min_vscale, int max_vscale); |
void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point); |
void drm_rect_rotate(struct drm_rect *r, |
int width, int height, |
unsigned int rotation); |
void drm_rect_rotate_inv(struct drm_rect *r, |
int width, int height, |
unsigned int rotation); |
#endif |
/drivers/include/drm/drm_vma_manager.h |
---|
221,8 → 221,8 |
* @file_mapping: Address space to unmap @node from |
* |
* Unmap all userspace mappings for a given offset node. The mappings must be |
* associated with the @file_mapping address-space. If no offset exists or |
* the address-space is invalid, nothing is done. |
* associated with the @file_mapping address-space. If no offset exists |
* nothing is done. |
* |
* This call is unlocked. The caller must guarantee that drm_vma_offset_remove() |
* is not called on this node concurrently. |
230,10 → 230,10 |
static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node, |
struct address_space *file_mapping) |
{ |
if (file_mapping && drm_vma_node_has_offset(node)) |
unmap_mapping_range(file_mapping, |
drm_vma_node_offset_addr(node), |
drm_vma_node_size(node) << PAGE_SHIFT, 1); |
// if (drm_vma_node_has_offset(node)) |
// unmap_mapping_range(file_mapping, |
// drm_vma_node_offset_addr(node), |
// drm_vma_node_size(node) << PAGE_SHIFT, 1); |
} |
/** |
/drivers/include/drm/i915_drm.h |
---|
56,6 → 56,12 |
#define I830_GMCH_CTRL 0x52 |
#define I830_GMCH_GMS_MASK 0x70 |
#define I830_GMCH_GMS_LOCAL 0x10 |
#define I830_GMCH_GMS_STOLEN_512 0x20 |
#define I830_GMCH_GMS_STOLEN_1024 0x30 |
#define I830_GMCH_GMS_STOLEN_8192 0x40 |
#define I855_GMCH_GMS_MASK 0xF0 |
#define I855_GMCH_GMS_STOLEN_0M 0x0 |
#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) |
72,4 → 78,18 |
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) |
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) |
#define I830_DRB3 0x63 |
#define I85X_DRB3 0x43 |
#define I865_TOUD 0xc4 |
#define I830_ESMRAMC 0x91 |
#define I845_ESMRAMC 0x9e |
#define I85X_ESMRAMC 0x61 |
#define TSEG_ENABLE (1 << 0) |
#define I830_TSEG_SIZE_512K (0 << 1) |
#define I830_TSEG_SIZE_1M (1 << 1) |
#define I845_TSEG_SIZE_MASK (3 << 1) |
#define I845_TSEG_SIZE_512K (2 << 1) |
#define I845_TSEG_SIZE_1M (3 << 1) |
#endif /* _I915_DRM_H_ */ |
/drivers/include/drm/i915_pciids.h |
---|
191,8 → 191,8 |
INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ |
INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ |
INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ |
INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \ |
INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \ |
INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \ |
INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \ |
INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ |
INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ |
INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ |
223,14 → 223,40 |
_INTEL_BDW_D(gt, 0x160A, info), /* Server */ \ |
_INTEL_BDW_D(gt, 0x160D, info) /* Workstation */ |
#define INTEL_BDW_M_IDS(info) \ |
#define INTEL_BDW_GT12M_IDS(info) \ |
_INTEL_BDW_M_IDS(1, info), \ |
_INTEL_BDW_M_IDS(2, info), \ |
_INTEL_BDW_M_IDS(2, info) |
#define INTEL_BDW_GT12D_IDS(info) \ |
_INTEL_BDW_D_IDS(1, info), \ |
_INTEL_BDW_D_IDS(2, info) |
#define INTEL_BDW_GT3M_IDS(info) \ |
_INTEL_BDW_M_IDS(3, info) |
#define INTEL_BDW_D_IDS(info) \ |
_INTEL_BDW_D_IDS(1, info), \ |
_INTEL_BDW_D_IDS(2, info), \ |
#define INTEL_BDW_GT3D_IDS(info) \ |
_INTEL_BDW_D_IDS(3, info) |
#define INTEL_BDW_RSVDM_IDS(info) \ |
_INTEL_BDW_M_IDS(4, info) |
#define INTEL_BDW_RSVDD_IDS(info) \ |
_INTEL_BDW_D_IDS(4, info) |
#define INTEL_BDW_M_IDS(info) \ |
INTEL_BDW_GT12M_IDS(info), \ |
INTEL_BDW_GT3M_IDS(info), \ |
INTEL_BDW_RSVDM_IDS(info) |
#define INTEL_BDW_D_IDS(info) \ |
INTEL_BDW_GT12D_IDS(info), \ |
INTEL_BDW_GT3D_IDS(info), \ |
INTEL_BDW_RSVDD_IDS(info) |
#define INTEL_CHV_IDS(info) \ |
INTEL_VGA_DEVICE(0x22b0, info), \ |
INTEL_VGA_DEVICE(0x22b1, info), \ |
INTEL_VGA_DEVICE(0x22b2, info), \ |
INTEL_VGA_DEVICE(0x22b3, info) |
#endif /* _I915_PCIIDS_H */ |
/drivers/include/drm/i915_powerwell.h |
---|
30,7 → 30,8 |
#define _I915_POWERWELL_H_ |
/* For use by hda_i915 driver */ |
extern void i915_request_power_well(void); |
extern void i915_release_power_well(void); |
extern int i915_request_power_well(void); |
extern int i915_release_power_well(void); |
extern int i915_get_cdclk_freq(void); |
#endif /* _I915_POWERWELL_H_ */ |
/drivers/include/drm/ttm/ttm_bo_api.h |
---|
39,16 → 39,13 |
#include <linux/mutex.h> |
#include <linux/mm.h> |
#include <linux/bitmap.h> |
#include <linux/reservation.h> |
struct ttm_bo_device; |
struct drm_mm_node; |
struct reservation_object { |
struct mutex lock; |
}; |
/** |
* struct ttm_placement |
* |
488,13 → 485,12 |
void (*destroy) (struct ttm_buffer_object *)); |
/** |
* ttm_bo_synccpu_object_init |
* ttm_bo_create |
* |
* @bdev: Pointer to a ttm_bo_device struct. |
* @bo: Pointer to a ttm_buffer_object to be initialized. |
* @size: Requested size of buffer object. |
* @type: Requested type of buffer object. |
* @flags: Initial placement flags. |
* @placement: Initial placement. |
* @page_alignment: Data alignment in pages. |
* @interruptible: If needing to sleep while waiting for GPU resources, |
* sleep interruptible. |
/drivers/include/drm/ttm/ttm_bo_driver.h |
---|
37,12 → 37,11 |
#include <drm/drm_mm.h> |
#include <drm/drm_global.h> |
#include <drm/drm_vma_manager.h> |
//#include <linux/workqueue.h> |
#include <linux/workqueue.h> |
#include <linux/fs.h> |
#include <linux/spinlock.h> |
#include <linux/reservation.h> |
struct ww_acquire_ctx; |
struct ttm_backend_func { |
/** |
* struct ttm_backend_func member bind |
134,6 → 133,7 |
* struct ttm_dma_tt |
* |
* @ttm: Base ttm_tt struct. |
* @cpu_address: The CPU address of the pages |
* @dma_address: The DMA (bus) addresses of the pages |
* @pages_list: used by some page allocation backend |
* |
143,6 → 143,7 |
*/ |
struct ttm_dma_tt { |
struct ttm_tt ttm; |
void **cpu_address; |
dma_addr_t *dma_address; |
struct list_head pages_list; |
}; |
183,6 → 184,7 |
* @man: Pointer to a memory type manager. |
* @bo: Pointer to the buffer object we're allocating space for. |
* @placement: Placement details. |
* @flags: Additional placement flags. |
* @mem: Pointer to a struct ttm_mem_reg to be filled in. |
* |
* This function should allocate space in the memory type managed |
207,6 → 209,7 |
int (*get_node)(struct ttm_mem_type_manager *man, |
struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
uint32_t flags, |
struct ttm_mem_reg *mem); |
/** |
654,18 → 657,6 |
extern int ttm_tt_swapin(struct ttm_tt *ttm); |
/** |
* ttm_tt_cache_flush: |
* |
* @pages: An array of pointers to struct page:s to flush. |
* @num_pages: Number of pages to flush. |
* |
* Flush the data of the indicated pages from the cpu caches. |
* This is used when changing caching attributes of the pages from |
* cache-coherent. |
*/ |
extern void ttm_tt_cache_flush(struct page *pages[], unsigned long num_pages); |
/** |
* ttm_tt_set_placement_caching: |
* |
* @ttm A struct ttm_tt the backing pages of which will change caching policy. |
748,6 → 739,7 |
* @bdev: A pointer to a struct ttm_bo_device to initialize. |
* @glob: A pointer to an initialized struct ttm_bo_global. |
* @driver: A pointer to a struct ttm_bo_driver set up by the caller. |
* @mapping: The address space to use for this bo. |
* @file_page_offset: Offset into the device address space that is available |
* for buffer data. This ensures compatibility with other users of the |
* address space. |
759,6 → 751,7 |
extern int ttm_bo_device_init(struct ttm_bo_device *bdev, |
struct ttm_bo_global *glob, |
struct ttm_bo_driver *driver, |
struct address_space *mapping, |
uint64_t file_page_offset, bool need_dma32); |
/** |
787,7 → 780,7 |
extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo); |
/** |
* ttm_bo_reserve_nolru: |
* __ttm_bo_reserve: |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* @interruptible: Sleep interruptible if waiting. |
808,13 → 801,13 |
* -EALREADY: Bo already reserved using @ticket. This error code will only |
* be returned if @use_ticket is set to true. |
*/ |
static inline int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo, |
static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo, |
bool interruptible, |
bool no_wait, bool use_ticket, |
struct ww_acquire_ctx *ticket) |
{ |
int ret = 0; |
/* |
if (no_wait) { |
bool success; |
if (WARN_ON(ticket)) |
830,7 → 823,6 |
ret = ww_mutex_lock(&bo->resv->lock, ticket); |
if (ret == -EINTR) |
return -ERESTARTSYS; |
*/ |
return ret; |
} |
888,8 → 880,7 |
WARN_ON(!atomic_read(&bo->kref.refcount)); |
ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_ticket, |
ticket); |
ret = __ttm_bo_reserve(bo, interruptible, no_wait, use_ticket, ticket); |
if (likely(ret == 0)) |
ttm_bo_del_sub_from_lru(bo); |
914,10 → 905,6 |
WARN_ON(!atomic_read(&bo->kref.refcount)); |
if (interruptible) |
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, |
ticket); |
else |
ww_mutex_lock_slow(&bo->resv->lock, ticket); |
if (likely(ret == 0)) |
929,33 → 916,45 |
} |
/** |
* ttm_bo_unreserve_ticket |
* __ttm_bo_unreserve |
* @bo: A pointer to a struct ttm_buffer_object. |
* @ticket: ww_acquire_ctx used for reserving |
* |
* Unreserve a previous reservation of @bo made with @ticket. |
* Unreserve a previous reservation of @bo where the buffer object is |
* already on lru lists. |
*/ |
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, |
struct ww_acquire_ctx *t) |
static inline void __ttm_bo_unreserve(struct ttm_buffer_object *bo) |
{ |
ww_mutex_unlock(&bo->resv->lock); |
} |
/** |
* ttm_bo_unreserve |
* |
* @bo: A pointer to a struct ttm_buffer_object. |
* |
* Unreserve a previous reservation of @bo. |
*/ |
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
{ |
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
spin_lock(&bo->glob->lru_lock); |
ttm_bo_add_to_lru(bo); |
spin_unlock(&bo->glob->lru_lock); |
} |
// ww_mutex_unlock(&bo->resv->lock); |
__ttm_bo_unreserve(bo); |
} |
/** |
* ttm_bo_unreserve |
* |
* ttm_bo_unreserve_ticket |
* @bo: A pointer to a struct ttm_buffer_object. |
* @ticket: ww_acquire_ctx used for reserving |
* |
* Unreserve a previous reservation of @bo. |
* Unreserve a previous reservation of @bo made with @ticket. |
*/ |
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) |
static inline void ttm_bo_unreserve_ticket(struct ttm_buffer_object *bo, |
struct ww_acquire_ctx *t) |
{ |
ttm_bo_unreserve_ticket(bo, NULL); |
ttm_bo_unreserve(bo); |
} |
/* |
/drivers/include/drm/ttm/ttm_lock.h |
---|
51,7 → 51,7 |
#include <ttm/ttm_object.h> |
#include <linux/wait.h> |
#include <linux/atomic.h> |
//#include <linux/atomic.h> |
/** |
* struct ttm_lock |
/drivers/include/drm/ttm/ttm_object.h |
---|
244,6 → 244,10 |
extern int ttm_ref_object_add(struct ttm_object_file *tfile, |
struct ttm_base_object *base, |
enum ttm_ref_type ref_type, bool *existed); |
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile, |
struct ttm_base_object *base); |
/** |
* ttm_ref_object_base_unref |
* |
/drivers/include/drm/ttm/ttm_page_alloc.h |
---|
29,6 → 29,8 |
#include <drm/ttm/ttm_bo_driver.h> |
#include <drm/ttm/ttm_memory.h> |
struct device; |
/** |
* Initialize pool allocator. |
*/ |
/drivers/include/drm/ttm/ttm_placement.h |
---|
65,6 → 65,8 |
* reference the buffer. |
* TTM_PL_FLAG_NO_EVICT means that the buffer may never |
* be evicted to make room for other buffers. |
* TTM_PL_FLAG_TOPDOWN requests to be placed from the |
* top of the memory area, instead of the bottom. |
*/ |
#define TTM_PL_FLAG_CACHED (1 << 16) |
72,6 → 74,7 |
#define TTM_PL_FLAG_WC (1 << 18) |
#define TTM_PL_FLAG_SHARED (1 << 20) |
#define TTM_PL_FLAG_NO_EVICT (1 << 21) |
#define TTM_PL_FLAG_TOPDOWN (1 << 22) |
#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ |
TTM_PL_FLAG_UNCACHED | \ |
/drivers/include/linux/backlight.h |
---|
7,4 → 7,36 |
#ifndef _LINUX_BACKLIGHT_H |
#define _LINUX_BACKLIGHT_H |
/* Notes on locking: |
* |
* backlight_device->ops_lock is an internal backlight lock protecting the |
* ops pointer and no code outside the core should need to touch it. |
* |
* Access to update_status() is serialised by the update_lock mutex since |
* most drivers seem to need this and historically get it wrong. |
* |
* Most drivers don't need locking on their get_brightness() method. |
* If yours does, you need to implement it in the driver. You can use the |
* update_lock mutex if appropriate. |
* |
* Any other use of the locks below is probably wrong. |
*/ |
enum backlight_update_reason { |
BACKLIGHT_UPDATE_HOTKEY, |
BACKLIGHT_UPDATE_SYSFS, |
}; |
enum backlight_type { |
BACKLIGHT_RAW = 1, |
BACKLIGHT_PLATFORM, |
BACKLIGHT_FIRMWARE, |
BACKLIGHT_TYPE_MAX, |
}; |
enum backlight_notification { |
BACKLIGHT_REGISTERED, |
BACKLIGHT_UNREGISTERED, |
}; |
#endif |
/drivers/include/linux/bitmap.h |
---|
88,32 → 88,32 |
* lib/bitmap.c provides these functions: |
*/ |
extern int __bitmap_empty(const unsigned long *bitmap, int bits); |
extern int __bitmap_full(const unsigned long *bitmap, int bits); |
extern int __bitmap_empty(const unsigned long *bitmap, unsigned int nbits); |
extern int __bitmap_full(const unsigned long *bitmap, unsigned int nbits); |
extern int __bitmap_equal(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, |
int bits); |
unsigned int nbits); |
extern void __bitmap_shift_right(unsigned long *dst, |
const unsigned long *src, int shift, int bits); |
extern void __bitmap_shift_left(unsigned long *dst, |
const unsigned long *src, int shift, int bits); |
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern int __bitmap_intersects(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern int __bitmap_subset(const unsigned long *bitmap1, |
const unsigned long *bitmap2, int bits); |
extern int __bitmap_weight(const unsigned long *bitmap, int bits); |
const unsigned long *bitmap2, unsigned int nbits); |
extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); |
extern void bitmap_set(unsigned long *map, int i, int len); |
extern void bitmap_clear(unsigned long *map, int start, int nr); |
extern void bitmap_set(unsigned long *map, unsigned int start, int len); |
extern void bitmap_clear(unsigned long *map, unsigned int start, int len); |
extern unsigned long bitmap_find_next_zero_area(unsigned long *map, |
unsigned long size, |
unsigned long start, |
140,9 → 140,9 |
const unsigned long *relmap, int bits); |
extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, |
int sz, int bits); |
extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); |
extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); |
extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); |
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); |
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); |
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); |
extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); |
188,15 → 188,15 |
} |
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return (*dst = *src1 & *src2) != 0; |
return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
return __bitmap_and(dst, src1, src2, nbits); |
} |
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src1 | *src2; |
205,7 → 205,7 |
} |
static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src1 ^ *src2; |
214,24 → 214,24 |
} |
static inline int bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return (*dst = *src1 & ~(*src2)) != 0; |
return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
return __bitmap_andnot(dst, src1, src2, nbits); |
} |
static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
int nbits) |
unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = ~(*src) & BITMAP_LAST_WORD_MASK(nbits); |
*dst = ~(*src); |
else |
__bitmap_complement(dst, src, nbits); |
} |
static inline int bitmap_equal(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
240,7 → 240,7 |
} |
static inline int bitmap_intersects(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
249,7 → 249,7 |
} |
static inline int bitmap_subset(const unsigned long *src1, |
const unsigned long *src2, int nbits) |
const unsigned long *src2, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); |
257,7 → 257,7 |
return __bitmap_subset(src1, src2, nbits); |
} |
static inline int bitmap_empty(const unsigned long *src, int nbits) |
static inline int bitmap_empty(const unsigned long *src, unsigned nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); |
265,7 → 265,7 |
return __bitmap_empty(src, nbits); |
} |
static inline int bitmap_full(const unsigned long *src, int nbits) |
static inline int bitmap_full(const unsigned long *src, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); |
273,7 → 273,7 |
return __bitmap_full(src, nbits); |
} |
static inline int bitmap_weight(const unsigned long *src, int nbits) |
static inline int bitmap_weight(const unsigned long *src, unsigned int nbits) |
{ |
if (small_const_nbits(nbits)) |
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits)); |
284,7 → 284,7 |
const unsigned long *src, int n, int nbits) |
{ |
if (small_const_nbits(nbits)) |
*dst = *src >> n; |
*dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> n; |
else |
__bitmap_shift_right(dst, src, n, nbits); |
} |
/drivers/include/linux/bitops.h |
---|
4,12 → 4,23 |
#ifdef __KERNEL__ |
#define BIT(nr) (1UL << (nr)) |
#define BIT_ULL(nr) (1ULL << (nr)) |
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) |
#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) |
#define BITS_PER_BYTE 8 |
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) |
#endif |
/* |
* Create a contiguous bitmask starting at bit position @l and ending at |
* position @h. For example |
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
*/ |
#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
extern unsigned int __sw_hweight8(unsigned int w); |
extern unsigned int __sw_hweight16(unsigned int w); |
extern unsigned int __sw_hweight32(unsigned int w); |
185,6 → 196,21 |
#ifdef __KERNEL__ |
#ifndef set_mask_bits |
#define set_mask_bits(ptr, _mask, _bits) \ |
({ \ |
const typeof(*ptr) mask = (_mask), bits = (_bits); \ |
typeof(*ptr) old, new; \ |
\ |
do { \ |
old = ACCESS_ONCE(*ptr); \ |
new = (old & ~mask) | bits; \ |
} while (cmpxchg(ptr, old, new) != old); \ |
\ |
new; \ |
}) |
#endif |
#ifndef find_last_bit |
/** |
* find_last_bit - find the last set bit in a memory region |
/drivers/include/linux/bug.h |
---|
57,6 → 57,7 |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
/* Force a compilation error if a constant expression is not a power of 2 */ |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ |
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) |
/drivers/include/linux/byteorder/generic.h |
---|
2,7 → 2,7 |
#define _LINUX_BYTEORDER_GENERIC_H |
/* |
* linux/byteorder_generic.h |
* linux/byteorder/generic.h |
* Generic Byte-reordering support |
* |
* The "... p" macros, like le64_to_cpup, can be used with pointers |
/drivers/include/linux/compiler-gcc.h |
---|
37,6 → 37,9 |
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ |
(typeof(ptr)) (__ptr + (off)); }) |
/* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) |
#ifdef __CHECKER__ |
#define __must_be_array(arr) 0 |
#else |
50,9 → 53,14 |
*/ |
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ |
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) |
# define inline inline __attribute__((always_inline)) |
# define __inline__ __inline__ __attribute__((always_inline)) |
# define __inline __inline __attribute__((always_inline)) |
# define inline inline __attribute__((always_inline)) notrace |
# define __inline__ __inline__ __attribute__((always_inline)) notrace |
# define __inline __inline __attribute__((always_inline)) notrace |
#else |
/* A lot of inline functions can cause havoc with function tracing */ |
# define inline inline notrace |
# define __inline__ __inline__ notrace |
# define __inline __inline notrace |
#endif |
#define __deprecated __attribute__((deprecated)) |
/drivers/include/linux/compiler-gcc4.h |
---|
75,11 → 75,7 |
* |
* (asm goto is automatically volatile - the naming reflects this.) |
*/ |
#if GCC_VERSION <= 40801 |
# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) |
#else |
# define asm_volatile_goto(x...) do { asm goto(x); } while (0) |
#endif |
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP |
#if GCC_VERSION >= 40400 |
/drivers/include/linux/compiler.h |
---|
63,6 → 63,13 |
# include <linux/compiler-intel.h> |
#endif |
/* Clang compiler defines __GNUC__. So we will overwrite implementations |
* coming from above header files here |
*/ |
#ifdef __clang__ |
#include <linux/compiler-clang.h> |
#endif |
/* |
* Generic compiler-dependent macros required for kernel |
* build go below this comment. Actual compiler/compiler version |
170,6 → 177,10 |
(typeof(ptr)) (__ptr + (off)); }) |
#endif |
#ifndef OPTIMIZER_HIDE_VAR |
#define OPTIMIZER_HIDE_VAR(var) barrier() |
#endif |
/* Not-quite-unique ID. */ |
#ifndef __UNIQUE_ID |
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
298,6 → 309,11 |
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
#endif |
/* Is this type a native word size -- useful for atomic operations */ |
#ifndef __native_word |
# define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) |
#endif |
/* Compile time object size, -1 for unknown */ |
#ifndef __compiletime_object_size |
# define __compiletime_object_size(obj) -1 |
307,9 → 323,18 |
#endif |
#ifndef __compiletime_error |
# define __compiletime_error(message) |
/* |
* Sparse complains of variable sized arrays due to the temporary variable in |
* __compiletime_assert. Unfortunately we can't just expand it out to make |
* sparse see a constant array size without breaking compiletime_assert on old |
* versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. |
*/ |
# ifndef __CHECKER__ |
# define __compiletime_error_fallback(condition) \ |
do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
#else |
# endif |
#endif |
#ifndef __compiletime_error_fallback |
# define __compiletime_error_fallback(condition) do { } while (0) |
#endif |
337,6 → 362,10 |
#define compiletime_assert(condition, msg) \ |
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
#define compiletime_assert_atomic_type(t) \ |
compiletime_assert(__native_word(t), \ |
"Need native word sized stores/loads for atomicity.") |
/* |
* Prevent the compiler from merging or refetching accesses. The compiler |
* is also forbidden from reordering successive instances of ACCESS_ONCE(), |
354,7 → 383,9 |
/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ |
#ifdef CONFIG_KPROBES |
# define __kprobes __attribute__((__section__(".kprobes.text"))) |
# define nokprobe_inline __always_inline |
#else |
# define __kprobes |
# define nokprobe_inline inline |
#endif |
#endif /* __LINUX_COMPILER_H */ |
/drivers/include/linux/dma-buf.h |
---|
115,6 → 115,7 |
* @exp_name: name of the exporter; useful for debugging. |
* @list_node: node for dma_buf accounting and debugging. |
* @priv: exporter specific private data for this buffer object. |
* @resv: reservation object linked to this dma-buf |
*/ |
struct dma_buf { |
size_t size; |
168,10 → 169,11 |
struct dma_buf_attachment *dmabuf_attach); |
struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, |
size_t size, int flags, const char *); |
size_t size, int flags, const char *, |
struct reservation_object *); |
#define dma_buf_export(priv, ops, size, flags) \ |
dma_buf_export_named(priv, ops, size, flags, __FILE__) |
#define dma_buf_export(priv, ops, size, flags, resv) \ |
dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv) |
int dma_buf_fd(struct dma_buf *dmabuf, int flags); |
struct dma_buf *dma_buf_get(int fd); |
194,4 → 196,6 |
unsigned long); |
void *dma_buf_vmap(struct dma_buf *); |
void dma_buf_vunmap(struct dma_buf *, void *vaddr); |
int dma_buf_debugfs_create_file(const char *name, |
int (*write)(struct seq_file *)); |
#endif /* __DMA_BUF_H__ */ |
/drivers/include/linux/err.h |
---|
2,12 → 2,13 |
#define _LINUX_ERR_H |
#include <linux/compiler.h> |
#include <linux/types.h> |
#include <errno.h> |
/* |
* Kernel pointers have redundant information, so we can use a |
* scheme where we can return either an error code or a dentry |
* scheme where we can return either an error code or a normal |
* pointer with the same return value. |
* |
* This should be a per-architecture thing, to allow different |
29,12 → 30,12 |
return (long) ptr; |
} |
static inline long __must_check IS_ERR(__force const void *ptr) |
static inline bool __must_check IS_ERR(__force const void *ptr) |
{ |
return IS_ERR_VALUE((unsigned long)ptr); |
} |
static inline long __must_check IS_ERR_OR_NULL(__force const void *ptr) |
static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr) |
{ |
return !ptr || IS_ERR_VALUE((unsigned long)ptr); |
} |
/drivers/include/linux/fb.h |
---|
413,6 → 413,8 |
struct fb_info; |
struct device; |
struct file; |
struct videomode; |
struct device_node; |
/* Definitions below are used in the parsed monitor specs */ |
#define FB_DPMS_ACTIVE_OFF 1 |
439,6 → 441,7 |
#define FB_MISC_PRIM_COLOR 1 |
#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ |
#define FB_MISC_HDMI 4 |
struct fb_chroma { |
__u32 redx; /* in fraction of 1024 */ |
__u32 greenx; |
690,6 → 693,10 |
/* teardown any resources to do with this framebuffer */ |
void (*fb_destroy)(struct fb_info *info); |
/* called at KDB enter and leave time to prepare the console */ |
int (*fb_debug_enter)(struct fb_info *info); |
int (*fb_debug_leave)(struct fb_info *info); |
}; |
#ifdef CONFIG_FB_TILEBLITTING |
938,7 → 945,7 |
#define fb_memcpy_fromfb sbus_memcpy_fromio |
#define fb_memcpy_tofb sbus_memcpy_toio |
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) |
#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__) |
#define fb_readb __raw_readb |
#define fb_readw __raw_readw |
999,7 → 1006,7 |
extern int register_framebuffer(struct fb_info *fb_info); |
extern int unregister_framebuffer(struct fb_info *fb_info); |
extern int unlink_framebuffer(struct fb_info *fb_info); |
extern void remove_conflicting_framebuffers(struct apertures_struct *a, |
extern int remove_conflicting_framebuffers(struct apertures_struct *a, |
const char *name, bool primary); |
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
extern int fb_show_logo(struct fb_info *fb_info, int rotate); |
1027,7 → 1034,7 |
static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, |
u8 *src, u32 s_pitch, u32 height) |
{ |
int i, j; |
u32 i, j; |
d_pitch -= s_pitch; |
/drivers/include/linux/file.h |
---|
4,4 → 4,5 |
#ifndef __LINUX_FILE_H |
#define __LINUX_FILE_H |
struct file; |
#endif /* __LINUX_FILE_H */ |
/drivers/include/linux/firmware.h |
---|
43,9 → 43,11 |
int request_firmware(const struct firmware **fw, const char *name, |
struct device *device); |
int request_firmware_nowait( |
struct module *module, int uevent, |
const char *name, struct device *device, void *context, |
struct module *module, bool uevent, |
const char *name, struct device *device, gfp_t gfp, void *context, |
void (*cont)(const struct firmware *fw, void *context)); |
int request_firmware_direct(const struct firmware **fw, const char *name, |
struct device *device); |
void release_firmware(const struct firmware *fw); |
/drivers/include/linux/hashtable.h |
---|
0,0 → 1,205 |
/* |
* Statically sized hash table implementation |
* (C) 2012 Sasha Levin <levinsasha928@gmail.com> |
*/ |
#ifndef _LINUX_HASHTABLE_H |
#define _LINUX_HASHTABLE_H |
#include <linux/list.h> |
#include <linux/types.h> |
#include <linux/kernel.h> |
#include <linux/hash.h> |
#include <linux/rculist.h> |
#define DEFINE_HASHTABLE(name, bits) \ |
struct hlist_head name[1 << (bits)] = \ |
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } |
#define DECLARE_HASHTABLE(name, bits) \ |
struct hlist_head name[1 << (bits)] |
#define HASH_SIZE(name) (ARRAY_SIZE(name)) |
#define HASH_BITS(name) ilog2(HASH_SIZE(name)) |
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ |
#define hash_min(val, bits) \ |
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) |
static inline void __hash_init(struct hlist_head *ht, unsigned int sz) |
{ |
unsigned int i; |
for (i = 0; i < sz; i++) |
INIT_HLIST_HEAD(&ht[i]); |
} |
/** |
* hash_init - initialize a hash table |
* @hashtable: hashtable to be initialized |
* |
* Calculates the size of the hashtable from the given parameter, otherwise |
* same as hash_init_size. |
* |
* This has to be a macro since HASH_BITS() will not work on pointers since |
* it calculates the size during preprocessing. |
*/ |
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) |
/** |
* hash_add - add an object to a hashtable |
* @hashtable: hashtable to add to |
* @node: the &struct hlist_node of the object to be added |
* @key: the key of the object to be added |
*/ |
#define hash_add(hashtable, node, key) \ |
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) |
/** |
* hash_add_rcu - add an object to a rcu enabled hashtable |
* @hashtable: hashtable to add to |
* @node: the &struct hlist_node of the object to be added |
* @key: the key of the object to be added |
*/ |
#define hash_add_rcu(hashtable, node, key) \ |
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) |
/** |
* hash_hashed - check whether an object is in any hashtable |
* @node: the &struct hlist_node of the object to be checked |
*/ |
static inline bool hash_hashed(struct hlist_node *node) |
{ |
return !hlist_unhashed(node); |
} |
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) |
{ |
unsigned int i; |
for (i = 0; i < sz; i++) |
if (!hlist_empty(&ht[i])) |
return false; |
return true; |
} |
/** |
* hash_empty - check whether a hashtable is empty |
* @hashtable: hashtable to check |
* |
* This has to be a macro since HASH_BITS() will not work on pointers since |
* it calculates the size during preprocessing. |
*/ |
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) |
/** |
* hash_del - remove an object from a hashtable |
* @node: &struct hlist_node of the object to remove |
*/ |
static inline void hash_del(struct hlist_node *node) |
{ |
hlist_del_init(node); |
} |
/** |
* hash_del_rcu - remove an object from a rcu enabled hashtable |
* @node: &struct hlist_node of the object to remove |
*/ |
static inline void hash_del_rcu(struct hlist_node *node) |
{ |
hlist_del_init_rcu(node); |
} |
/** |
* hash_for_each - iterate over a hashtable |
* @name: hashtable to iterate |
* @bkt: integer to use as bucket loop cursor |
* @obj: the type * to use as a loop cursor for each entry |
* @member: the name of the hlist_node within the struct |
*/ |
#define hash_for_each(name, bkt, obj, member) \ |
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ |
(bkt)++)\ |
hlist_for_each_entry(obj, &name[bkt], member) |
/** |
* hash_for_each_rcu - iterate over a rcu enabled hashtable |
* @name: hashtable to iterate |
* @bkt: integer to use as bucket loop cursor |
* @obj: the type * to use as a loop cursor for each entry |
* @member: the name of the hlist_node within the struct |
*/ |
#define hash_for_each_rcu(name, bkt, obj, member) \ |
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ |
(bkt)++)\ |
hlist_for_each_entry_rcu(obj, &name[bkt], member) |
/** |
* hash_for_each_safe - iterate over a hashtable safe against removal of |
* hash entry |
* @name: hashtable to iterate |
* @bkt: integer to use as bucket loop cursor |
* @tmp: a &struct used for temporary storage |
* @obj: the type * to use as a loop cursor for each entry |
* @member: the name of the hlist_node within the struct |
*/ |
#define hash_for_each_safe(name, bkt, tmp, obj, member) \ |
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ |
(bkt)++)\ |
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) |
/** |
* hash_for_each_possible - iterate over all possible objects hashing to the |
* same bucket |
* @name: hashtable to iterate |
* @obj: the type * to use as a loop cursor for each entry |
* @member: the name of the hlist_node within the struct |
* @key: the key of the objects to iterate over |
*/ |
#define hash_for_each_possible(name, obj, member, key) \ |
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) |
/** |
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the |
* same bucket in an rcu enabled hashtable |
* in a rcu enabled hashtable |
* @name: hashtable to iterate |
* @obj: the type * to use as a loop cursor for each entry |
* @member: the name of the hlist_node within the struct |
* @key: the key of the objects to iterate over |
*/ |
#define hash_for_each_possible_rcu(name, obj, member, key) \ |
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ |
member) |
/** |
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing |
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable |
* @name: hashtable to iterate |
* @obj: the type * to use as a loop cursor for each entry |
* @member: the name of the hlist_node within the struct |
* @key: the key of the objects to iterate over |
* |
* This is the same as hash_for_each_possible_rcu() except that it does |
* not do any RCU debugging or tracing. |
*/ |
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \ |
hlist_for_each_entry_rcu_notrace(obj, \ |
&name[hash_min(key, HASH_BITS(name))], member) |
/** |
* hash_for_each_possible_safe - iterate over all possible objects hashing to the |
* same bucket safe against removals |
* @name: hashtable to iterate |
* @obj: the type * to use as a loop cursor for each entry |
* @tmp: a &struct used for temporary storage |
* @member: the name of the hlist_node within the struct |
* @key: the key of the objects to iterate over |
*/ |
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ |
hlist_for_each_entry_safe(obj, tmp,\ |
&name[hash_min(key, HASH_BITS(name))], member) |
#endif |
/drivers/include/linux/hdmi.h |
---|
262,6 → 262,18 |
struct hdmi_vendor_infoframe hdmi; |
}; |
/** |
* union hdmi_infoframe - overall union of all abstract infoframe representations |
* @any: generic infoframe |
* @avi: avi infoframe |
* @spd: spd infoframe |
* @vendor: union of all vendor infoframes |
* @audio: audio infoframe |
* |
* This is used by the generic pack function. This works since all infoframes |
* have the same header which also indicates which type of infoframe should be |
* packed. |
*/ |
union hdmi_infoframe { |
struct hdmi_any_infoframe any; |
struct hdmi_avi_infoframe avi; |
/drivers/include/linux/i2c.h |
---|
135,7 → 135,6 |
* @name: Indicates the type of the device, usually a chip name that's |
* generic enough to hide second-sourcing and compatible revisions. |
* @adapter: manages the bus segment hosting this I2C device |
* @driver: device's driver, hence pointer to access routines |
* @dev: Driver model device node for the slave. |
* @irq: indicates the IRQ generated by this device (if any) |
* @detected: member of an i2c_driver.clients list or i2c-core's |
152,7 → 151,6 |
/* _LOWER_ 7 bits */ |
char name[I2C_NAME_SIZE]; |
struct i2c_adapter *adapter; /* the adapter we sit on */ |
struct i2c_driver *driver; /* and our access routines */ |
struct device dev; /* the device structure */ |
int irq; /* irq issued by device */ |
struct list_head detected; |
160,6 → 158,7 |
#define to_i2c_client(d) container_of(d, struct i2c_client, dev) |
extern struct i2c_client *i2c_verify_client(struct device *dev); |
extern struct i2c_adapter *i2c_verify_adapter(struct device *dev); |
/** |
* struct i2c_board_info - template for device creation |
209,6 → 208,10 |
* i2c_algorithm is the interface to a class of hardware solutions which can |
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584 |
* to name two of the most common. |
* |
* The return codes from the @master_xfer field should indicate the type of |
* error code that occured during the transfer, as documented in the kernel |
* Documentation file Documentation/i2c/fault-codes. |
*/ |
struct i2c_algorithm { |
/* If an adapter algorithm can't do I2C-level access, set master_xfer |
275,6 → 278,7 |
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */ |
#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
#define I2C_CLASS_SPD (1<<7) /* Memory modules */ |
#define I2C_CLASS_DEPRECATED (1<<8) /* Warn users that adapter will stop using classes */ |
/* Internal numbers to terminate lists */ |
#define I2C_CLIENT_END 0xfffeU |
/drivers/include/linux/idr.h |
---|
35,21 → 35,24 |
struct idr_layer { |
int prefix; /* the ID prefix of this idr_layer */ |
DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */ |
int layer; /* distance from leaf */ |
struct idr_layer __rcu *ary[1<<IDR_BITS]; |
int count; /* When zero, we can release it */ |
int layer; /* distance from leaf */ |
union { |
/* A zero bit means "space here" */ |
DECLARE_BITMAP(bitmap, IDR_SIZE); |
struct rcu_head rcu_head; |
}; |
}; |
struct idr { |
struct idr_layer __rcu *hint; /* the last layer allocated from */ |
struct idr_layer __rcu *top; |
struct idr_layer *id_free; |
int layers; /* only valid w/o concurrent changes */ |
int id_free_cnt; |
int cur; /* current pos for cyclic allocation */ |
spinlock_t lock; |
int id_free_cnt; |
struct idr_layer *id_free; |
}; |
#define IDR_INIT(name) \ |
88,9 → 91,9 |
void *idr_get_next(struct idr *idp, int *nextid); |
void *idr_replace(struct idr *idp, void *ptr, int id); |
void idr_remove(struct idr *idp, int id); |
void idr_free(struct idr *idp, int id); |
void idr_destroy(struct idr *idp); |
void idr_init(struct idr *idp); |
bool idr_is_empty(struct idr *idp); |
/** |
* idr_preload_end - end preload section started with idr_preload() |
139,69 → 142,6 |
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id) |
/* |
* Don't use the following functions. These exist only to suppress |
* deprecated warnings on EXPORT_SYMBOL()s. |
*/ |
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask); |
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
void __idr_remove_all(struct idr *idp); |
/** |
* idr_pre_get - reserve resources for idr allocation |
* @idp: idr handle |
* @gfp_mask: memory allocation flags |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
{ |
return __idr_pre_get(idp, gfp_mask); |
} |
/** |
* idr_get_new_above - allocate new idr entry above or equal to a start id |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @starting_id: id to start search at |
* @id: pointer to the allocated handle |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr, |
int starting_id, int *id) |
{ |
return __idr_get_new_above(idp, ptr, starting_id, id); |
} |
/** |
* idr_get_new - allocate new idr entry |
* @idp: idr handle |
* @ptr: pointer you want associated with the id |
* @id: pointer to the allocated handle |
* |
* Part of old alloc interface. This is going away. Use |
* idr_preload[_end]() and idr_alloc() instead. |
*/ |
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id) |
{ |
return __idr_get_new_above(idp, ptr, 0, id); |
} |
/** |
* idr_remove_all - remove all ids from the given idr tree |
* @idp: idr handle |
* |
* If you're trying to destroy @idp, calling idr_destroy() is enough. |
* This is going away. Don't use. |
*/ |
static inline void __deprecated idr_remove_all(struct idr *idp) |
{ |
__idr_remove_all(idp); |
} |
/* |
* IDA - IDR based id allocator, use when translation from id to |
* pointer isn't necessary. |
* |
/drivers/include/linux/interval_tree.h |
---|
0,0 → 1,27 |
#ifndef _LINUX_INTERVAL_TREE_H |
#define _LINUX_INTERVAL_TREE_H |
#include <linux/rbtree.h> |
struct interval_tree_node { |
struct rb_node rb; |
unsigned long start; /* Start of interval */ |
unsigned long last; /* Last location _in_ interval */ |
unsigned long __subtree_last; |
}; |
extern void |
interval_tree_insert(struct interval_tree_node *node, struct rb_root *root); |
extern void |
interval_tree_remove(struct interval_tree_node *node, struct rb_root *root); |
extern struct interval_tree_node * |
interval_tree_iter_first(struct rb_root *root, |
unsigned long start, unsigned long last); |
extern struct interval_tree_node * |
interval_tree_iter_next(struct interval_tree_node *node, |
unsigned long start, unsigned long last); |
#endif /* _LINUX_INTERVAL_TREE_H */ |
/drivers/include/linux/interval_tree_generic.h |
---|
0,0 → 1,191 |
/* |
Interval Trees |
(C) 2012 Michel Lespinasse <walken@google.com> |
This program is free software; you can redistribute it and/or modify |
it under the terms of the GNU General Public License as published by |
the Free Software Foundation; either version 2 of the License, or |
(at your option) any later version. |
This program is distributed in the hope that it will be useful, |
but WITHOUT ANY WARRANTY; without even the implied warranty of |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
GNU General Public License for more details. |
You should have received a copy of the GNU General Public License |
along with this program; if not, write to the Free Software |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
include/linux/interval_tree_generic.h |
*/ |
#include <linux/rbtree_augmented.h> |
/* |
* Template for implementing interval trees |
* |
* ITSTRUCT: struct type of the interval tree nodes |
* ITRB: name of struct rb_node field within ITSTRUCT |
* ITTYPE: type of the interval endpoints |
* ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree |
* ITSTART(n): start endpoint of ITSTRUCT node n |
* ITLAST(n): last endpoint of ITSTRUCT node n |
* ITSTATIC: 'static' or empty |
* ITPREFIX: prefix to use for the inline tree definitions |
* |
* Note - before using this, please consider if non-generic version |
* (interval_tree.h) would work for you... |
*/ |
#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \ |
ITSTART, ITLAST, ITSTATIC, ITPREFIX) \ |
\ |
/* Callbacks for augmented rbtree insert and remove */ \ |
\ |
static inline ITTYPE ITPREFIX ## _compute_subtree_last(ITSTRUCT *node) \ |
{ \ |
ITTYPE max = ITLAST(node), subtree_last; \ |
if (node->ITRB.rb_left) { \ |
subtree_last = rb_entry(node->ITRB.rb_left, \ |
ITSTRUCT, ITRB)->ITSUBTREE; \ |
if (max < subtree_last) \ |
max = subtree_last; \ |
} \ |
if (node->ITRB.rb_right) { \ |
subtree_last = rb_entry(node->ITRB.rb_right, \ |
ITSTRUCT, ITRB)->ITSUBTREE; \ |
if (max < subtree_last) \ |
max = subtree_last; \ |
} \ |
return max; \ |
} \ |
\ |
RB_DECLARE_CALLBACKS(static, ITPREFIX ## _augment, ITSTRUCT, ITRB, \ |
ITTYPE, ITSUBTREE, ITPREFIX ## _compute_subtree_last) \ |
\ |
/* Insert / remove interval nodes from the tree */ \ |
\ |
ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, struct rb_root *root) \ |
{ \ |
struct rb_node **link = &root->rb_node, *rb_parent = NULL; \ |
ITTYPE start = ITSTART(node), last = ITLAST(node); \ |
ITSTRUCT *parent; \ |
\ |
while (*link) { \ |
rb_parent = *link; \ |
parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \ |
if (parent->ITSUBTREE < last) \ |
parent->ITSUBTREE = last; \ |
if (start < ITSTART(parent)) \ |
link = &parent->ITRB.rb_left; \ |
else \ |
link = &parent->ITRB.rb_right; \ |
} \ |
\ |
node->ITSUBTREE = last; \ |
rb_link_node(&node->ITRB, rb_parent, link); \ |
rb_insert_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ |
} \ |
\ |
ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, struct rb_root *root) \ |
{ \ |
rb_erase_augmented(&node->ITRB, root, &ITPREFIX ## _augment); \ |
} \ |
\ |
/* \ |
* Iterate over intervals intersecting [start;last] \ |
* \ |
* Note that a node's interval intersects [start;last] iff: \ |
* Cond1: ITSTART(node) <= last \ |
* and \ |
* Cond2: start <= ITLAST(node) \ |
*/ \ |
\ |
static ITSTRUCT * \ |
ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ |
{ \ |
while (true) { \ |
/* \ |
* Loop invariant: start <= node->ITSUBTREE \ |
* (Cond2 is satisfied by one of the subtree nodes) \ |
*/ \ |
if (node->ITRB.rb_left) { \ |
ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \ |
ITSTRUCT, ITRB); \ |
if (start <= left->ITSUBTREE) { \ |
/* \ |
* Some nodes in left subtree satisfy Cond2. \ |
* Iterate to find the leftmost such node N. \ |
* If it also satisfies Cond1, that's the \ |
* match we are looking for. Otherwise, there \ |
* is no matching interval as nodes to the \ |
* right of N can't satisfy Cond1 either. \ |
*/ \ |
node = left; \ |
continue; \ |
} \ |
} \ |
if (ITSTART(node) <= last) { /* Cond1 */ \ |
if (start <= ITLAST(node)) /* Cond2 */ \ |
return node; /* node is leftmost match */ \ |
if (node->ITRB.rb_right) { \ |
node = rb_entry(node->ITRB.rb_right, \ |
ITSTRUCT, ITRB); \ |
if (start <= node->ITSUBTREE) \ |
continue; \ |
} \ |
} \ |
return NULL; /* No match */ \ |
} \ |
} \ |
\ |
ITSTATIC ITSTRUCT * \ |
ITPREFIX ## _iter_first(struct rb_root *root, ITTYPE start, ITTYPE last) \ |
{ \ |
ITSTRUCT *node; \ |
\ |
if (!root->rb_node) \ |
return NULL; \ |
node = rb_entry(root->rb_node, ITSTRUCT, ITRB); \ |
if (node->ITSUBTREE < start) \ |
return NULL; \ |
return ITPREFIX ## _subtree_search(node, start, last); \ |
} \ |
\ |
ITSTATIC ITSTRUCT * \ |
ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \ |
{ \ |
struct rb_node *rb = node->ITRB.rb_right, *prev; \ |
\ |
while (true) { \ |
/* \ |
* Loop invariants: \ |
* Cond1: ITSTART(node) <= last \ |
* rb == node->ITRB.rb_right \ |
* \ |
* First, search right subtree if suitable \ |
*/ \ |
if (rb) { \ |
ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \ |
if (start <= right->ITSUBTREE) \ |
return ITPREFIX ## _subtree_search(right, \ |
start, last); \ |
} \ |
\ |
/* Move up the tree until we come from a node's left child */ \ |
do { \ |
rb = rb_parent(&node->ITRB); \ |
if (!rb) \ |
return NULL; \ |
prev = &node->ITRB; \ |
node = rb_entry(rb, ITSTRUCT, ITRB); \ |
rb = node->ITRB.rb_right; \ |
} while (prev == rb); \ |
\ |
/* Check if the node intersects [start;last] */ \ |
if (last < ITSTART(node)) /* !Cond1 */ \ |
return NULL; \ |
else if (start <= ITLAST(node)) /* Cond2 */ \ |
return node; \ |
} \ |
} |
/drivers/include/linux/ioport.h |
---|
57,7 → 57,7 |
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ |
#define IORESOURCE_DISABLED 0x10000000 |
#define IORESOURCE_UNSET 0x20000000 |
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */ |
#define IORESOURCE_AUTO 0x40000000 |
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ |
/drivers/include/linux/irqreturn.h |
---|
14,6 → 14,6 |
}; |
typedef enum irqreturn irqreturn_t; |
#define IRQ_RETVAL(x) ((x) != IRQ_NONE) |
#define IRQ_RETVAL(x) ((x) ? IRQ_HANDLED : IRQ_NONE) |
#endif |
/drivers/include/linux/jiffies.h |
---|
76,10 → 76,18 |
* The 64-bit value is not atomic - you MUST NOT read it |
* without sampling the sequence number in jiffies_lock. |
* get_jiffies_64() will do this for you as appropriate. |
*/ |
extern u64 jiffies_64; |
extern unsigned long volatile jiffies; |
#if (BITS_PER_LONG < 64) |
u64 get_jiffies_64(void); |
#else |
static inline u64 get_jiffies_64(void) |
{ |
return (u64)GetTimerTicks(); |
return (u64)jiffies; |
} |
#endif |
/* |
* These inlines deal with timer wrapping correctly. You are |
290,6 → 298,12 |
*/ |
extern unsigned int jiffies_to_msecs(const unsigned long j); |
extern unsigned int jiffies_to_usecs(const unsigned long j); |
static inline u64 jiffies_to_nsecs(const unsigned long j) |
{ |
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC; |
} |
extern unsigned long msecs_to_jiffies(const unsigned int m); |
extern unsigned long usecs_to_jiffies(const unsigned int u); |
extern unsigned long timespec_to_jiffies(const struct timespec *value); |
/drivers/include/linux/kernel.h |
---|
31,6 → 31,19 |
#define ULLONG_MAX (~0ULL) |
#define SIZE_MAX (~(size_t)0) |
#define U8_MAX ((u8)~0U) |
#define S8_MAX ((s8)(U8_MAX>>1)) |
#define S8_MIN ((s8)(-S8_MAX - 1)) |
#define U16_MAX ((u16)~0U) |
#define S16_MAX ((s16)(U16_MAX>>1)) |
#define S16_MIN ((s16)(-S16_MAX - 1)) |
#define U32_MAX ((u32)~0U) |
#define S32_MAX ((s32)(U32_MAX>>1)) |
#define S32_MIN ((s32)(-S32_MAX - 1)) |
#define U64_MAX ((u64)~0ULL) |
#define S64_MAX ((s64)(U64_MAX>>1)) |
#define S64_MIN ((s64)(-S64_MAX - 1)) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) |
126,6 → 139,13 |
*/ |
#define lower_32_bits(n) ((u32)(n)) |
#define abs64(x) ({ \ |
s64 __x = (x); \ |
(__x < 0) ? -__x : __x; \ |
}) |
#define KERN_EMERG "<0>" /* system is unusable */ |
#define KERN_ALERT "<1>" /* action must be taken immediately */ |
#define KERN_CRIT "<2>" /* critical conditions */ |
159,6 → 179,9 |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...); |
extern __printf(2, 3) |
char *kasprintf(gfp_t gfp, const char *fmt, ...); |
/* |
* min()/max()/clamp() macros that also do |
493,5 → 516,35 |
}) |
static inline __must_check long __copy_to_user(void __user *to, |
const void *from, unsigned long n) |
{ |
if (__builtin_constant_p(n)) { |
switch(n) { |
case 1: |
*(u8 __force *)to = *(u8 *)from; |
return 0; |
case 2: |
*(u16 __force *)to = *(u16 *)from; |
return 0; |
case 4: |
*(u32 __force *)to = *(u32 *)from; |
return 0; |
#ifdef CONFIG_64BIT |
case 8: |
*(u64 __force *)to = *(u64 *)from; |
return 0; |
#endif |
default: |
break; |
} |
} |
memcpy((void __force *)to, from, n); |
return 0; |
} |
struct seq_file; |
#endif |
/drivers/include/linux/kgdb.h |
---|
0,0 → 1,24 |
#ifndef _KDB_H |
#define _KDB_H |
/* |
* Kernel Debugger Architecture Independent Global Headers |
* |
* This file is subject to the terms and conditions of the GNU General Public |
* License. See the file "COPYING" in the main directory of this archive |
* for more details. |
* |
* Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved. |
* Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com> |
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> |
*/ |
typedef enum { |
KDB_REPEAT_NONE = 0, /* Do not repeat this command */ |
KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ |
KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ |
} kdb_repeat_t; |
typedef int (*kdb_func_t)(int, const char **); |
#endif /* !_KDB_H */ |
/drivers/include/linux/kobject.h |
---|
31,8 → 31,10 |
#define UEVENT_NUM_ENVP 32 /* number of env pointers */ |
#define UEVENT_BUFFER_SIZE 2048 /* buffer for the variables */ |
#ifdef CONFIG_UEVENT_HELPER |
/* path to the userspace helper executed on an event */ |
extern char uevent_helper[]; |
#endif |
/* counter to tag the uevent, read only except for the kobject core */ |
extern u64 uevent_seqnum; |
65,6 → 67,9 |
struct kobj_type *ktype; |
// struct sysfs_dirent *sd; |
struct kref kref; |
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE |
struct delayed_work release; |
#endif |
unsigned int state_initialized:1; |
unsigned int state_in_sysfs:1; |
unsigned int state_add_uevent_sent:1; |
103,6 → 108,7 |
extern struct kobject *kobject_get(struct kobject *kobj); |
extern void kobject_put(struct kobject *kobj); |
extern const void *kobject_namespace(struct kobject *kobj); |
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); |
struct kobj_type { |
/drivers/include/linux/list.h |
---|
361,6 → 361,17 |
list_entry((ptr)->next, type, member) |
/** |
* list_last_entry - get the last element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
#define list_last_entry(ptr, type, member) \ |
list_entry((ptr)->prev, type, member) |
/** |
* list_first_entry_or_null - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
372,6 → 383,22 |
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) |
/** |
* list_next_entry - get the next element in list |
* @pos: the type * to cursor |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_next_entry(pos, member) \ |
list_entry((pos)->member.next, typeof(*(pos)), member) |
/** |
* list_prev_entry - get the prev element in list |
* @pos: the type * to cursor |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_prev_entry(pos, member) \ |
list_entry((pos)->member.prev, typeof(*(pos)), member) |
/** |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
415,9 → 442,9 |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member); \ |
for (pos = list_first_entry(head, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
pos = list_next_entry(pos, member)) |
/** |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
426,9 → 453,9 |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
for (pos = list_last_entry(head, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
pos = list_prev_entry(pos, member)) |
/** |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
451,9 → 478,9 |
* the current position. |
*/ |
#define list_for_each_entry_continue(pos, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
for (pos = list_next_entry(pos, member); \ |
&pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
pos = list_next_entry(pos, member)) |
/** |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
465,9 → 492,9 |
* the current position. |
*/ |
#define list_for_each_entry_continue_reverse(pos, head, member) \ |
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
for (pos = list_prev_entry(pos, member); \ |
&pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
pos = list_prev_entry(pos, member)) |
/** |
* list_for_each_entry_from - iterate over list of given type from the current point |
479,7 → 506,7 |
*/ |
#define list_for_each_entry_from(pos, head, member) \ |
for (; &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
pos = list_next_entry(pos, member)) |
/** |
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
489,10 → 516,10 |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
for (pos = list_first_entry(head, typeof(*pos), member), \ |
n = list_next_entry(pos, member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
pos = n, n = list_next_entry(n, member)) |
/** |
* list_for_each_entry_safe_continue - continue list iteration safe against removal |
505,10 → 532,10 |
* safe against removal of list entry. |
*/ |
#define list_for_each_entry_safe_continue(pos, n, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
for (pos = list_next_entry(pos, member), \ |
n = list_next_entry(pos, member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
pos = n, n = list_next_entry(n, member)) |
/** |
* list_for_each_entry_safe_from - iterate over list from current point safe against removal |
521,9 → 548,9 |
* removal of list entry. |
*/ |
#define list_for_each_entry_safe_from(pos, n, head, member) \ |
for (n = list_entry(pos->member.next, typeof(*pos), member); \ |
for (n = list_next_entry(pos, member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
pos = n, n = list_next_entry(n, member)) |
/** |
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal |
536,10 → 563,10 |
* of list entry. |
*/ |
#define list_for_each_entry_safe_reverse(pos, n, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member), \ |
n = list_entry(pos->member.prev, typeof(*pos), member); \ |
for (pos = list_last_entry(head, typeof(*pos), member), \ |
n = list_prev_entry(pos, member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
pos = n, n = list_prev_entry(n, member)) |
/** |
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop |
554,7 → 581,7 |
* completing the current iteration of the loop body. |
*/ |
#define list_safe_reset_next(pos, n, member) \ |
n = list_entry(pos->member.next, typeof(*pos), member) |
n = list_next_entry(pos, member) |
/* |
* Double linked lists with a single pointer list head. |
626,15 → 653,15 |
*(n->pprev) = n; |
} |
static inline void hlist_add_after(struct hlist_node *n, |
struct hlist_node *next) |
static inline void hlist_add_behind(struct hlist_node *n, |
struct hlist_node *prev) |
{ |
next->next = n->next; |
n->next = next; |
next->pprev = &n->next; |
n->next = prev->next; |
prev->next = n; |
n->pprev = &prev->next; |
if(next->next) |
next->next->pprev = &next->next; |
if (n->next) |
n->next->pprev = &n->next; |
} |
/* after that we'll appear to be on some hlist and hlist_del will work */ |
/drivers/include/linux/lockdep.h |
---|
228,9 → 228,9 |
unsigned int trylock:1; /* 16 bits */ |
unsigned int read:2; /* see lock_acquire() comment */ |
unsigned int check:2; /* see lock_acquire() comment */ |
unsigned int check:1; /* see lock_acquire() comment */ |
unsigned int hardirqs_off:1; |
unsigned int references:11; /* 32 bits */ |
unsigned int references:12; /* 32 bits */ |
}; |
/* |
241,7 → 241,7 |
extern void lockdep_reset(void); |
extern void lockdep_reset_lock(struct lockdep_map *lock); |
extern void lockdep_free_key_range(void *start, unsigned long size); |
extern void lockdep_sys_exit(void); |
extern asmlinkage void lockdep_sys_exit(void); |
extern void lockdep_off(void); |
extern void lockdep_on(void); |
279,7 → 279,7 |
(lock)->dep_map.key, sub) |
#define lockdep_set_novalidate_class(lock) \ |
lockdep_set_class(lock, &__lockdep_no_validate__) |
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock) |
/* |
* Compare locking classes |
*/ |
302,9 → 302,8 |
* |
* Values for check: |
* |
* 0: disabled |
* 1: simple checks (freeing, held-at-exit-time, etc.) |
* 2: full validation |
* 0: simple checks (freeing, held-at-exit-time, etc.) |
* 1: full validation |
*/ |
extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
int trylock, int read, int check, |
335,10 → 334,14 |
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
#define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l)) |
#define lockdep_assert_held(l) do { \ |
WARN_ON(debug_locks && !lockdep_is_held(l)); \ |
} while (0) |
#else /* !LOCKDEP */ |
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) |
#else /* !CONFIG_LOCKDEP */ |
static inline void lockdep_off(void) |
{ |
} |
384,7 → 387,7 |
#define lockdep_depth(tsk) (0) |
#define lockdep_assert_held(l) do { } while (0) |
#define lockdep_assert_held(l) do { (void)(l); } while (0) |
#define lockdep_recursing(tsk) (0) |
532,13 → 535,13 |
# define might_lock(lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \ |
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
} while (0) |
# define might_lock_read(lock) \ |
do { \ |
typecheck(struct lockdep_map *, &(lock)->dep_map); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \ |
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \ |
lock_release(&(lock)->dep_map, 0, _THIS_IP_); \ |
} while (0) |
#else |
/drivers/include/linux/math64.h |
---|
133,4 → 133,34 |
return ret; |
} |
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) |
#ifndef mul_u64_u32_shr |
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) |
{ |
return (u64)(((unsigned __int128)a * mul) >> shift); |
} |
#endif /* mul_u64_u32_shr */ |
#else |
#ifndef mul_u64_u32_shr |
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) |
{ |
u32 ah, al; |
u64 ret; |
al = a; |
ah = a >> 32; |
ret = ((u64)al * mul) >> shift; |
if (ah) |
ret += ((u64)ah * mul) << (32 - shift); |
return ret; |
} |
#endif /* mul_u64_u32_shr */ |
#endif |
#endif /* _LINUX_MATH64_H */ |
/drivers/include/linux/mod_devicetable.h |
---|
431,6 → 431,14 |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
#define SPMI_NAME_SIZE 32 |
#define SPMI_MODULE_PREFIX "spmi:" |
struct spmi_device_id { |
char name[SPMI_NAME_SIZE]; |
kernel_ulong_t driver_data; /* Data private to the driver */ |
}; |
/* dmi */ |
enum dmi_field { |
DMI_NONE, |
547,6 → 555,11 |
* See documentation of "x86_match_cpu" for details. |
*/ |
/* |
* MODULE_DEVICE_TABLE expects this struct to be called x86cpu_device_id. |
* Although gcc seems to ignore this error, clang fails without this define. |
*/ |
#define x86cpu_device_id x86_cpu_id |
struct x86_cpu_id { |
__u16 vendor; |
__u16 family; |
563,6 → 576,15 |
#define X86_MODEL_ANY 0 |
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */ |
/* |
* Generic table type for matching CPU features. |
* @feature: the bit number of the feature (0 - 65535) |
*/ |
struct cpu_feature { |
__u16 feature; |
}; |
#define IPACK_ANY_FORMAT 0xff |
#define IPACK_ANY_ID (~0) |
struct ipack_device_id { |
598,4 → 620,9 |
__u16 asm_did, asm_vid; |
}; |
struct mcb_device_id { |
__u16 device; |
kernel_ulong_t driver_data; |
}; |
#endif /* LINUX_MOD_DEVICETABLE_H */ |
/drivers/include/linux/mutex.h |
---|
92,4 → 92,16 |
return atomic_read(&lock->count) != 1; |
} |
static inline int mutex_trylock(struct mutex *lock) |
{ |
if (likely(atomic_cmpxchg(&lock->count, 1, 0) == 1)) |
return 1; |
return 0; |
} |
static inline void mutex_destroy(struct mutex *lock) |
{ |
}; |
#endif |
/drivers/include/linux/pci.h |
---|
456,6 → 456,7 |
(pci_resource_end((dev), (bar)) - \ |
pci_resource_start((dev), (bar)) + 1)) |
#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ |
struct pci_bus { |
struct list_head node; /* node in list of buses */ |
480,7 → 481,7 |
char name[48]; |
unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ |
pci_bus_flags_t bus_flags; /* Inherited by child busses */ |
pci_bus_flags_t bus_flags; /* inherited by child buses */ |
struct device *bridge; |
struct device dev; |
struct bin_attribute *legacy_io; /* legacy I/O for this bus */ |
508,8 → 509,12 |
#define to_pci_bus(n) container_of(n, struct pci_bus, dev) |
/* |
* Returns true if the pci bus is root (behind host-pci bridge), |
* Returns true if the PCI bus is root (behind host-PCI bridge), |
* false otherwise |
* |
* Some code assumes that "bus->self == NULL" means that bus is a root bus. |
* This is incorrect because "virtual" buses added for SR-IOV (via |
* virtfn_add_bus()) have "bus->self == NULL" but are not root buses. |
*/ |
static inline bool pci_is_root_bus(struct pci_bus *pbus) |
{ |
531,6 → 536,32 |
#define PCIBIOS_SET_FAILED 0x88 |
#define PCIBIOS_BUFFER_TOO_SMALL 0x89 |
/* |
* Translate above to generic errno for passing back through non-PCI code. |
*/ |
static inline int pcibios_err_to_errno(int err) |
{ |
if (err <= PCIBIOS_SUCCESSFUL) |
return err; /* Assume already errno */ |
switch (err) { |
case PCIBIOS_FUNC_NOT_SUPPORTED: |
return -ENOENT; |
case PCIBIOS_BAD_VENDOR_ID: |
return -EINVAL; |
case PCIBIOS_DEVICE_NOT_FOUND: |
return -ENODEV; |
case PCIBIOS_BAD_REGISTER_NUMBER: |
return -EFAULT; |
case PCIBIOS_SET_FAILED: |
return -EIO; |
case PCIBIOS_BUFFER_TOO_SMALL: |
return -ENOSPC; |
} |
return -ENOTTY; |
} |
/* Low-level architecture-dependent routines */ |
struct pci_ops { |
586,7 → 617,7 |
* pci_is_pcie - check if the PCI device is PCI Express capable |
* @dev: PCI device |
* |
* Retrun true if the PCI device is PCI Express capable, false otherwise. |
* Returns: true if the PCI device is PCI Express capable, false otherwise. |
*/ |
static inline bool pci_is_pcie(struct pci_dev *dev) |
{ |
672,6 → 703,11 |
#define pci_name(x) "radeon" |
static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) |
{ |
return pdev->resource[bar].start; |
} |
#endif //__PCI__H__ |
/drivers/include/linux/rbtree.h |
---|
85,6 → 85,11 |
*rb_link = node; |
} |
#define rb_entry_safe(ptr, type, member) \ |
({ typeof(ptr) ____ptr = (ptr); \ |
____ptr ? rb_entry(____ptr, type, member) : NULL; \ |
}) |
/** |
* rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of |
* given type safe against removal of rb_node entry |
95,12 → 100,9 |
* @field: the name of the rb_node field within 'type'. |
*/ |
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ |
for (pos = rb_entry(rb_first_postorder(root), typeof(*pos), field),\ |
n = rb_entry(rb_next_postorder(&pos->field), \ |
typeof(*pos), field); \ |
&pos->field; \ |
pos = n, \ |
n = rb_entry(rb_next_postorder(&pos->field), \ |
typeof(*pos), field)) |
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ |
typeof(*pos), field); 1; }); \ |
pos = n) |
#endif /* _LINUX_RBTREE_H */ |
/drivers/include/linux/rculist.h |
---|
40,7 → 40,7 |
next->prev = new; |
} |
#else |
extern void __list_add_rcu(struct list_head *new, |
void __list_add_rcu(struct list_head *new, |
struct list_head *prev, struct list_head *next); |
#endif |
191,7 → 191,11 |
if (list_empty(list)) |
return; |
/* "first" and "last" tracking list, so initialize it. */ |
/* |
* "first" and "last" tracking list, so initialize it. RCU readers |
* have access to this list, so we must use INIT_LIST_HEAD_RCU() |
* instead of INIT_LIST_HEAD(). |
*/ |
INIT_LIST_HEAD(list); |
228,7 → 232,8 |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_entry_rcu(ptr, type, member) \ |
({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
({ \ |
typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ |
container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ |
}) |
266,10 → 271,10 |
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
*/ |
#define list_first_or_null_rcu(ptr, type, member) \ |
({struct list_head *__ptr = (ptr); \ |
({ \ |
struct list_head *__ptr = (ptr); \ |
struct list_head *__next = ACCESS_ONCE(__ptr->next); \ |
likely(__ptr != __next) ? \ |
list_entry_rcu(__next, type, member) : NULL; \ |
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \ |
}) |
/** |
412,9 → 417,9 |
} |
/** |
* hlist_add_after_rcu |
* hlist_add_behind_rcu |
* @n: the new element to add to the hash list. |
* @prev: the existing element to add the new element after. |
* @n: the new element to add to the hash list. |
* |
* Description: |
* Adds the specified element to the specified hlist |
429,8 → 434,8 |
* hlist_for_each_entry_rcu(), used to prevent memory-consistency |
* problems on Alpha CPUs. |
*/ |
static inline void hlist_add_after_rcu(struct hlist_node *prev, |
struct hlist_node *n) |
static inline void hlist_add_behind_rcu(struct hlist_node *n, |
struct hlist_node *prev) |
{ |
n->next = prev->next; |
n->pprev = &prev->next; |
/drivers/include/linux/reservation.h |
---|
0,0 → 1,62 |
/* |
* Header file for reservations for dma-buf and ttm |
* |
* Copyright(C) 2011 Linaro Limited. All rights reserved. |
* Copyright (C) 2012-2013 Canonical Ltd |
* Copyright (C) 2012 Texas Instruments |
* |
* Authors: |
* Rob Clark <robdclark@gmail.com> |
* Maarten Lankhorst <maarten.lankhorst@canonical.com> |
* Thomas Hellstrom <thellstrom-at-vmware-dot-com> |
* |
* Based on bo.c which bears the following copyright notice, |
* but is dual licensed: |
* |
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA |
* All Rights Reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the |
* "Software"), to deal in the Software without restriction, including |
* without limitation the rights to use, copy, modify, merge, publish, |
* distribute, sub license, and/or sell copies of the Software, and to |
* permit persons to whom the Software is furnished to do so, subject to |
* the following conditions: |
* |
* The above copyright notice and this permission notice (including the |
* next paragraph) shall be included in all copies or substantial portions |
* of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
* USE OR OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _LINUX_RESERVATION_H |
#define _LINUX_RESERVATION_H |
#include <linux/ww_mutex.h> |
extern struct ww_class reservation_ww_class; |
struct reservation_object { |
struct ww_mutex lock; |
}; |
static inline void |
reservation_object_init(struct reservation_object *obj) |
{ |
ww_mutex_init(&obj->lock, &reservation_ww_class); |
} |
static inline void |
reservation_object_fini(struct reservation_object *obj) |
{ |
ww_mutex_destroy(&obj->lock); |
} |
#endif /* _LINUX_RESERVATION_H */ |
/drivers/include/linux/scatterlist.h |
---|
101,19 → 101,6 |
return (struct page *)((sg)->page_link & ~0x3); |
} |
/** |
* sg_set_buf - Set sg entry to point at given data |
* @sg: SG entry |
* @buf: Data |
* @buflen: Data length |
* |
**/ |
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf, |
// unsigned int buflen) |
//{ |
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); |
//} |
/* |
* Loop over each sg element, following the pointer to a new list if necessary |
*/ |
226,10 → 213,10 |
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); |
typedef void (sg_free_fn)(struct scatterlist *, unsigned int); |
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); |
void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); |
void sg_free_table(struct sg_table *); |
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, |
sg_alloc_fn *); |
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, |
struct scatterlist *, gfp_t, sg_alloc_fn *); |
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); |
int sg_alloc_table_from_pages(struct sg_table *sgt, |
struct page **pages, unsigned int n_pages, |
241,6 → 228,11 |
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
void *buf, size_t buflen); |
size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, |
void *buf, size_t buflen, off_t skip); |
size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, |
void *buf, size_t buflen, off_t skip); |
/* |
* Maximum number of entries that will be allocated in one piece, if |
* a list larger than this is required then chaining will be utilized. |
/drivers/include/linux/sched.h |
---|
3,6 → 3,8 |
#define TASK_UNINTERRUPTIBLE 2 |
/* Task command name length */ |
#define TASK_COMM_LEN 16 |
#define schedule_timeout(x) delay(x) |
/drivers/include/linux/string.h |
---|
51,6 → 51,9 |
#ifndef __HAVE_ARCH_STRCHR |
extern char * strchr(const char *,int); |
#endif |
#ifndef __HAVE_ARCH_STRCHRNUL |
extern char * strchrnul(const char *,int); |
#endif |
#ifndef __HAVE_ARCH_STRNCHR |
extern char * strnchr(const char *, size_t, int); |
#endif |
/drivers/include/linux/types.h |
---|
116,11 → 116,12 |
typedef __u16 uint16_t; |
typedef __u32 uint32_t; |
#if defined(__GNUC__) |
typedef __u64 uint64_t; |
typedef __u64 u_int64_t; |
typedef __s64 int64_t; |
#endif |
typedef __signed__ long long int64_t; |
/* this is a special 64bit data type that is 8-byte aligned */ |
#define aligned_u64 __u64 __attribute__((aligned(8))) |
#define aligned_be64 __be64 __attribute__((aligned(8))) |
150,6 → 151,7 |
#define pgoff_t unsigned long |
#endif |
/* A dma_addr_t can hold any valid DMA or bus address for the platform */ |
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
typedef u64 dma_addr_t; |
#else |
200,6 → 202,7 |
#ifdef __KERNEL__ |
typedef unsigned __bitwise__ gfp_t; |
typedef unsigned __bitwise__ fmode_t; |
typedef unsigned __bitwise__ oom_flags_t; |
#ifdef CONFIG_PHYS_ADDR_T_64BIT |
typedef u64 phys_addr_t; |
209,6 → 212,12 |
typedef phys_addr_t resource_size_t; |
/* |
* This type is the placeholder for a hardware interrupt number. It has to be |
* big enough to enclose whatever representation is used by a given platform. |
*/ |
typedef unsigned long irq_hw_number_t; |
typedef struct { |
int counter; |
} atomic_t; |
/drivers/include/linux/uapi/drm/drm.h |
---|
39,7 → 39,7 |
#if defined(__KERNEL__) || defined(__linux__) |
#include <linux/types.h> |
#include <asm/ioctl.h> |
//#include <asm/ioctl.h> |
typedef unsigned int drm_handle_t; |
#else /* One of the BSDs */ |
619,6 → 619,17 |
#define DRM_PRIME_CAP_EXPORT 0x2 |
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6 |
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7 |
/* |
* The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight |
* combination for the hardware cursor. The intention is that a hardware |
* agnostic userspace can query a cursor plane size to use. |
* |
* Note that the cross-driver contract is to merely return a valid size; |
* drivers are free to attach another meaning on top, eg. i915 returns the |
* maximum plane size. |
*/ |
#define DRM_CAP_CURSOR_WIDTH 0x8 |
#define DRM_CAP_CURSOR_HEIGHT 0x9 |
/** DRM_IOCTL_GET_CAP ioctl argument type */ |
struct drm_get_cap { |
635,6 → 646,14 |
*/ |
#define DRM_CLIENT_CAP_STEREO_3D 1 |
/** |
* DRM_CLIENT_CAP_UNIVERSAL_PLANES |
* |
* If set to 1, the DRM core will expose all planes (overlay, primary, and |
* cursor) to userspace. |
*/ |
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 |
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
struct drm_set_client_cap { |
__u64 capability; |
761,7 → 780,7 |
/** |
* Device specific ioctls should only be in their respective headers |
* The device specific ioctl range is from 0x40 to 0x99. |
* The device specific ioctl range is from 0x40 to 0x9f. |
* Generic IOCTLS restart at 0xA0. |
* |
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and |
/drivers/include/linux/uapi/drm/drm_fourcc.h |
---|
0,0 → 1,135 |
/* |
* Copyright 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef DRM_FOURCC_H |
#define DRM_FOURCC_H |
#include <linux/types.h> |
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \ |
((__u32)(c) << 16) | ((__u32)(d) << 24)) |
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */ |
/* color index */ |
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */ |
/* 8 bpp RGB */ |
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */ |
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */ |
/* 16 bpp RGB */ |
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */ |
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */ |
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */ |
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */ |
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */ |
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */ |
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */ |
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */ |
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */ |
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */ |
/* 24 bpp RGB */ |
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */ |
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */ |
/* 32 bpp RGB */ |
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */ |
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */ |
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */ |
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */ |
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */ |
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */ |
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */ |
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ |
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ |
/* packed YCbCr */ |
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */ |
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */ |
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */ |
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */ |
/* |
* 2 plane YCbCr |
* index 0 = Y plane, [7:0] Y |
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian |
* or |
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian |
*/ |
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ |
/* special NV12 tiled format */ |
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ |
/* |
* 3 plane YCbCr |
* index 0: Y plane, [7:0] Y |
* index 1: Cb plane, [7:0] Cb |
* index 2: Cr plane, [7:0] Cr |
* or |
* index 1: Cr plane, [7:0] Cr |
* index 2: Cb plane, [7:0] Cb |
*/ |
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */ |
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ |
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ |
#endif /* DRM_FOURCC_H */ |
/drivers/include/linux/uapi/drm/drm_mode.h |
---|
0,0 → 1,520 |
/* |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com> |
* Copyright (c) 2008 Red Hat Inc. |
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA |
* Copyright (c) 2007-2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef _DRM_MODE_H |
#define _DRM_MODE_H |
#include <linux/types.h> |
#define DRM_DISPLAY_INFO_LEN 32 |
#define DRM_CONNECTOR_NAME_LEN 32 |
#define DRM_DISPLAY_MODE_LEN 32 |
#define DRM_PROP_NAME_LEN 32 |
#define DRM_MODE_TYPE_BUILTIN (1<<0) |
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_PREFERRED (1<<3) |
#define DRM_MODE_TYPE_DEFAULT (1<<4) |
#define DRM_MODE_TYPE_USERDEF (1<<5) |
#define DRM_MODE_TYPE_DRIVER (1<<6) |
/* Video mode flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_FLAG_PHSYNC (1<<0) |
#define DRM_MODE_FLAG_NHSYNC (1<<1) |
#define DRM_MODE_FLAG_PVSYNC (1<<2) |
#define DRM_MODE_FLAG_NVSYNC (1<<3) |
#define DRM_MODE_FLAG_INTERLACE (1<<4) |
#define DRM_MODE_FLAG_DBLSCAN (1<<5) |
#define DRM_MODE_FLAG_CSYNC (1<<6) |
#define DRM_MODE_FLAG_PCSYNC (1<<7) |
#define DRM_MODE_FLAG_NCSYNC (1<<8) |
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ |
#define DRM_MODE_FLAG_BCAST (1<<10) |
#define DRM_MODE_FLAG_PIXMUX (1<<11) |
#define DRM_MODE_FLAG_DBLCLK (1<<12) |
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) |
/* |
* When adding a new stereo mode don't forget to adjust DRM_MODE_FLAGS_3D_MAX |
* (define not exposed to user space). |
*/ |
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14) |
#define DRM_MODE_FLAG_3D_NONE (0<<14) |
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14) |
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14) |
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14) |
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14) |
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14) |
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14) |
/* DPMS flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_DPMS_ON 0 |
#define DRM_MODE_DPMS_STANDBY 1 |
#define DRM_MODE_DPMS_SUSPEND 2 |
#define DRM_MODE_DPMS_OFF 3 |
/* Scaling mode options */ |
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or |
software can still scale) */ |
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */ |
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */ |
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */ |
/* Picture aspect ratio options */ |
#define DRM_MODE_PICTURE_ASPECT_NONE 0 |
#define DRM_MODE_PICTURE_ASPECT_4_3 1 |
#define DRM_MODE_PICTURE_ASPECT_16_9 2 |
/* Dithering mode options */ |
#define DRM_MODE_DITHERING_OFF 0 |
#define DRM_MODE_DITHERING_ON 1 |
#define DRM_MODE_DITHERING_AUTO 2 |
/* Dirty info options */ |
#define DRM_MODE_DIRTY_OFF 0 |
#define DRM_MODE_DIRTY_ON 1 |
#define DRM_MODE_DIRTY_ANNOTATE 2 |
struct drm_mode_modeinfo { |
__u32 clock; |
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; |
__u32 vrefresh; |
__u32 flags; |
__u32 type; |
char name[DRM_DISPLAY_MODE_LEN]; |
}; |
struct drm_mode_card_res { |
__u64 fb_id_ptr; |
__u64 crtc_id_ptr; |
__u64 connector_id_ptr; |
__u64 encoder_id_ptr; |
__u32 count_fbs; |
__u32 count_crtcs; |
__u32 count_connectors; |
__u32 count_encoders; |
__u32 min_width, max_width; |
__u32 min_height, max_height; |
}; |
struct drm_mode_crtc { |
__u64 set_connectors_ptr; |
__u32 count_connectors; |
__u32 crtc_id; /**< Id */ |
__u32 fb_id; /**< Id of framebuffer */ |
__u32 x, y; /**< Position on the frameuffer */ |
__u32 gamma_size; |
__u32 mode_valid; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0) |
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1) |
/* Planes blend with or override other bits on the CRTC */ |
struct drm_mode_set_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; /* fb object contains surface format type */ |
__u32 flags; /* see above flags */ |
/* Signed dest location allows it to be partially off screen */ |
__s32 crtc_x, crtc_y; |
__u32 crtc_w, crtc_h; |
/* Source values are 16.16 fixed point */ |
__u32 src_x, src_y; |
__u32 src_h, src_w; |
}; |
struct drm_mode_get_plane { |
__u32 plane_id; |
__u32 crtc_id; |
__u32 fb_id; |
__u32 possible_crtcs; |
__u32 gamma_size; |
__u32 count_format_types; |
__u64 format_type_ptr; |
}; |
struct drm_mode_get_plane_res { |
__u64 plane_id_ptr; |
__u32 count_planes; |
}; |
#define DRM_MODE_ENCODER_NONE 0 |
#define DRM_MODE_ENCODER_DAC 1 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
#define DRM_MODE_ENCODER_VIRTUAL 5 |
#define DRM_MODE_ENCODER_DSI 6 |
#define DRM_MODE_ENCODER_DPMST 7 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
__u32 encoder_type; |
__u32 crtc_id; /**< Id of crtc */ |
__u32 possible_crtcs; |
__u32 possible_clones; |
}; |
/* This is for connectors with multiple signal types. */ |
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ |
#define DRM_MODE_SUBCONNECTOR_Automatic 0 |
#define DRM_MODE_SUBCONNECTOR_Unknown 0 |
#define DRM_MODE_SUBCONNECTOR_DVID 3 |
#define DRM_MODE_SUBCONNECTOR_DVIA 4 |
#define DRM_MODE_SUBCONNECTOR_Composite 5 |
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 |
#define DRM_MODE_SUBCONNECTOR_Component 8 |
#define DRM_MODE_SUBCONNECTOR_SCART 9 |
#define DRM_MODE_CONNECTOR_Unknown 0 |
#define DRM_MODE_CONNECTOR_VGA 1 |
#define DRM_MODE_CONNECTOR_DVII 2 |
#define DRM_MODE_CONNECTOR_DVID 3 |
#define DRM_MODE_CONNECTOR_DVIA 4 |
#define DRM_MODE_CONNECTOR_Composite 5 |
#define DRM_MODE_CONNECTOR_SVIDEO 6 |
#define DRM_MODE_CONNECTOR_LVDS 7 |
#define DRM_MODE_CONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_9PinDIN 9 |
#define DRM_MODE_CONNECTOR_DisplayPort 10 |
#define DRM_MODE_CONNECTOR_HDMIA 11 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
#define DRM_MODE_CONNECTOR_TV 13 |
#define DRM_MODE_CONNECTOR_eDP 14 |
#define DRM_MODE_CONNECTOR_VIRTUAL 15 |
#define DRM_MODE_CONNECTOR_DSI 16 |
struct drm_mode_get_connector { |
__u64 encoders_ptr; |
__u64 modes_ptr; |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_modes; |
__u32 count_props; |
__u32 count_encoders; |
__u32 encoder_id; /**< Current Encoder */ |
__u32 connector_id; /**< Id */ |
__u32 connector_type; |
__u32 connector_type_id; |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
__u32 pad; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
#define DRM_MODE_PROP_RANGE (1<<1) |
#define DRM_MODE_PROP_IMMUTABLE (1<<2) |
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ |
#define DRM_MODE_PROP_BLOB (1<<4) |
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ |
/* non-extended types: legacy bitmask, one bit per type: */ |
#define DRM_MODE_PROP_LEGACY_TYPE ( \ |
DRM_MODE_PROP_RANGE | \ |
DRM_MODE_PROP_ENUM | \ |
DRM_MODE_PROP_BLOB | \ |
DRM_MODE_PROP_BITMASK) |
/* extended-types: rather than continue to consume a bit per type, |
* grab a chunk of the bits to use as integer type id. |
*/ |
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 |
#define DRM_MODE_PROP_TYPE(n) ((n) << 6) |
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) |
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_mode_get_property { |
__u64 values_ptr; /* values and blob lengths */ |
__u64 enum_blob_ptr; /* enum and blob id ptrs */ |
__u32 prop_id; |
__u32 flags; |
char name[DRM_PROP_NAME_LEN]; |
__u32 count_values; |
__u32 count_enum_blobs; |
}; |
struct drm_mode_connector_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 connector_id; |
}; |
struct drm_mode_obj_get_properties { |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_props; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_obj_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 obj_id; |
__u32 obj_type; |
}; |
struct drm_mode_get_blob { |
__u32 blob_id; |
__u32 length; |
__u64 data; |
}; |
struct drm_mode_fb_cmd { |
__u32 fb_id; |
__u32 width, height; |
__u32 pitch; |
__u32 bpp; |
__u32 depth; |
/* driver specific handle */ |
__u32 handle; |
}; |
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */ |
struct drm_mode_fb_cmd2 { |
__u32 fb_id; |
__u32 width, height; |
__u32 pixel_format; /* fourcc code from drm_fourcc.h */ |
__u32 flags; /* see above flags */ |
/* |
* In case of planar formats, this ioctl allows up to 4 |
* buffer objects with offets and pitches per plane. |
* The pitch and offset order is dictated by the fourcc, |
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as: |
* |
* YUV 4:2:0 image with a plane of 8 bit Y samples |
* followed by an interleaved U/V plane containing |
* 8 bit 2x2 subsampled colour difference samples. |
* |
* So it would consist of Y as offset[0] and UV as |
* offeset[1]. Note that offset[0] will generally |
* be 0. |
*/ |
__u32 handles[4]; |
__u32 pitches[4]; /* pitch for each plane */ |
__u32 offsets[4]; /* offset of each plane */ |
}; |
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 |
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
#define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 |
/* |
* Mark a region of a framebuffer as dirty. |
* |
* Some hardware does not automatically update display contents |
* as a hardware or software draw to a framebuffer. This ioctl |
* allows userspace to tell the kernel and the hardware what |
* regions of the framebuffer have changed. |
* |
* The kernel or hardware is free to update more then just the |
* region specified by the clip rects. The kernel or hardware |
* may also delay and/or coalesce several calls to dirty into a |
* single update. |
* |
* Userspace may annotate the updates, the annotates are a |
* promise made by the caller that the change is either a copy |
* of pixels or a fill of a single color in the region specified. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then |
* the number of updated regions are half of num_clips given, |
* where the clip rects are paired in src and dst. The width and |
* height of each one of the pairs must match. |
* |
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller |
* promises that the region specified of the clip rects is filled |
* completely with a single color as given in the color argument. |
*/ |
struct drm_mode_fb_dirty_cmd { |
__u32 fb_id; |
__u32 flags; |
__u32 color; |
__u32 num_clips; |
__u64 clips_ptr; |
}; |
struct drm_mode_mode_cmd { |
__u32 connector_id; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO 0x01 |
#define DRM_MODE_CURSOR_MOVE 0x02 |
#define DRM_MODE_CURSOR_FLAGS 0x03 |
/* |
* depending on the value in flags different members are used. |
* |
* CURSOR_BO uses |
* crtc_id |
* width |
* height |
* handle - if 0 turns the cursor off |
* |
* CURSOR_MOVE uses |
* crtc_id |
* x |
* y |
*/ |
struct drm_mode_cursor { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_cursor2 { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
__s32 hot_x; |
__s32 hot_y; |
}; |
struct drm_mode_crtc_lut { |
__u32 crtc_id; |
__u32 gamma_size; |
/* pointers to arrays */ |
__u64 red; |
__u64 green; |
__u64 blue; |
}; |
#define DRM_MODE_PAGE_FLIP_EVENT 0x01 |
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02 |
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC) |
/* |
* Request a page flip on the specified crtc. |
* |
* This ioctl will ask KMS to schedule a page flip for the specified |
* crtc. Once any pending rendering targeting the specified fb (as of |
* ioctl time) has completed, the crtc will be reprogrammed to display |
* that fb after the next vertical refresh. The ioctl returns |
* immediately, but subsequent rendering to the current fb will block |
* in the execbuffer ioctl until the page flip happens. If a page |
* flip is already pending as the ioctl is called, EBUSY will be |
* returned. |
* |
* Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank |
* event (see drm.h: struct drm_event_vblank) when the page flip is |
* done. The user_data field passed in with this ioctl will be |
* returned as the user_data field in the vblank event struct. |
* |
* Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen |
* 'as soon as possible', meaning that it not delay waiting for vblank. |
* This may cause tearing on the screen. |
* |
* The reserved field must be zero until we figure out something |
* clever to use it for. |
*/ |
struct drm_mode_crtc_page_flip { |
__u32 crtc_id; |
__u32 fb_id; |
__u32 flags; |
__u32 reserved; |
__u64 user_data; |
}; |
/* create a dumb scanout buffer */ |
struct drm_mode_create_dumb { |
uint32_t height; |
uint32_t width; |
uint32_t bpp; |
uint32_t flags; |
/* handle, pitch, size will be returned */ |
uint32_t handle; |
uint32_t pitch; |
uint64_t size; |
}; |
/* set up for mmap of a dumb scanout buffer */ |
struct drm_mode_map_dumb { |
/** Handle for the object being mapped. */ |
__u32 handle; |
__u32 pad; |
/** |
* Fake offset to use for subsequent mmap call |
* |
* This is a fixed-size type for 32/64 compatibility. |
*/ |
__u64 offset; |
}; |
struct drm_mode_destroy_dumb { |
uint32_t handle; |
}; |
#endif |
/drivers/include/linux/uapi/drm/i915_drm.h |
---|
223,6 → 223,7 |
#define DRM_I915_GEM_GET_CACHING 0x30 |
#define DRM_I915_REG_READ 0x31 |
#define DRM_I915_GET_RESET_STATS 0x32 |
#define DRM_I915_GEM_USERPTR 0x33 |
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) |
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) |
273,6 → 274,7 |
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) |
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) |
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) |
#define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr) |
/* Allow drivers to submit batchbuffers directly to hardware, relying |
* on the security mechanisms provided by hardware. |
337,6 → 339,7 |
#define I915_PARAM_HAS_EXEC_NO_RELOC 25 |
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 |
#define I915_PARAM_HAS_WT 27 |
#define I915_PARAM_CMD_PARSER_VERSION 28 |
typedef struct drm_i915_getparam { |
int param; |
1049,6 → 1052,20 |
__u32 pad; |
}; |
struct drm_i915_gem_userptr { |
__u64 user_ptr; |
__u64 user_size; |
__u32 flags; |
#define I915_USERPTR_READ_ONLY 0x1 |
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
}; |
struct drm_i915_mask { |
__u32 handle; |
__u32 width; |
/drivers/include/linux/uapi/drm/radeon_drm.h |
---|
0,0 → 1,1040 |
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- |
* |
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Fremont, California. |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Kevin E. Martin <martin@valinux.com> |
* Gareth Hughes <gareth@valinux.com> |
* Keith Whitwell <keith@tungstengraphics.com> |
*/ |
#ifndef __RADEON_DRM_H__ |
#define __RADEON_DRM_H__ |
#include <drm/drm.h> |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the X server file (radeon_sarea.h) |
*/ |
#ifndef __RADEON_SAREA_DEFINES__ |
#define __RADEON_SAREA_DEFINES__ |
/* Old style state flags, required for sarea interface (1.1 and 1.2 |
* clears) and 1.2 drm_vertex2 ioctl. |
*/ |
#define RADEON_UPLOAD_CONTEXT 0x00000001 |
#define RADEON_UPLOAD_VERTFMT 0x00000002 |
#define RADEON_UPLOAD_LINE 0x00000004 |
#define RADEON_UPLOAD_BUMPMAP 0x00000008 |
#define RADEON_UPLOAD_MASKS 0x00000010 |
#define RADEON_UPLOAD_VIEWPORT 0x00000020 |
#define RADEON_UPLOAD_SETUP 0x00000040 |
#define RADEON_UPLOAD_TCL 0x00000080 |
#define RADEON_UPLOAD_MISC 0x00000100 |
#define RADEON_UPLOAD_TEX0 0x00000200 |
#define RADEON_UPLOAD_TEX1 0x00000400 |
#define RADEON_UPLOAD_TEX2 0x00000800 |
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 |
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 |
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 |
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ |
#define RADEON_REQUIRE_QUIESCENCE 0x00010000 |
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ |
#define RADEON_UPLOAD_ALL 0x003effff |
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff |
/* New style per-packet identifiers for use in cmd_buffer ioctl with |
* the RADEON_EMIT_PACKET command. Comments relate new packets to old |
* state bits and the packet size: |
*/ |
#define RADEON_EMIT_PP_MISC 0 /* context/7 */ |
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ |
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ |
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ |
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ |
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ |
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ |
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ |
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ |
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ |
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ |
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ |
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ |
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ |
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ |
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ |
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ |
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ |
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ |
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ |
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ |
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ |
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ |
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ |
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ |
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ |
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ |
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ |
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ |
#define R200_EMIT_VAP_CTL 32 /* vap/1 */ |
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ |
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ |
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ |
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ |
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ |
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ |
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ |
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ |
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ |
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ |
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ |
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ |
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ |
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ |
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ |
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ |
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ |
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ |
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ |
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ |
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ |
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ |
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ |
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ |
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ |
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ |
#define R200_EMIT_PP_CUBIC_FACES_0 61 |
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 |
#define R200_EMIT_PP_CUBIC_FACES_1 63 |
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 |
#define R200_EMIT_PP_CUBIC_FACES_2 65 |
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 |
#define R200_EMIT_PP_CUBIC_FACES_3 67 |
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 |
#define R200_EMIT_PP_CUBIC_FACES_4 69 |
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 |
#define R200_EMIT_PP_CUBIC_FACES_5 71 |
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 |
#define RADEON_EMIT_PP_TEX_SIZE_0 73 |
#define RADEON_EMIT_PP_TEX_SIZE_1 74 |
#define RADEON_EMIT_PP_TEX_SIZE_2 75 |
#define R200_EMIT_RB3D_BLENDCOLOR 76 |
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 |
#define RADEON_EMIT_PP_CUBIC_FACES_0 78 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 |
#define RADEON_EMIT_PP_CUBIC_FACES_1 80 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 |
#define RADEON_EMIT_PP_CUBIC_FACES_2 82 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 |
#define R200_EMIT_PP_TRI_PERF_CNTL 84 |
#define R200_EMIT_PP_AFS_0 85 |
#define R200_EMIT_PP_AFS_1 86 |
#define R200_EMIT_ATF_TFACTOR 87 |
#define R200_EMIT_PP_TXCTLALL_0 88 |
#define R200_EMIT_PP_TXCTLALL_1 89 |
#define R200_EMIT_PP_TXCTLALL_2 90 |
#define R200_EMIT_PP_TXCTLALL_3 91 |
#define R200_EMIT_PP_TXCTLALL_4 92 |
#define R200_EMIT_PP_TXCTLALL_5 93 |
#define R200_EMIT_VAP_PVS_CNTL 94 |
#define RADEON_MAX_STATE_PACKETS 95 |
/* Commands understood by cmd_buffer ioctl. More can be added but |
* obviously these can't be removed or changed: |
*/ |
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ |
#define RADEON_CMD_SCALARS 2 /* emit scalar data */ |
#define RADEON_CMD_VECTORS 3 /* emit vector data */ |
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ |
#define RADEON_CMD_PACKET3 5 /* emit hw packet */ |
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ |
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ |
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: |
* doesn't make the cpu wait, just |
* the graphics hardware */ |
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ |
typedef union { |
int i; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, packet_id, pad0, pad1; |
} packet; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} scalars; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} vectors; |
struct { |
unsigned char cmd_type, addr_lo, addr_hi, count; |
} veclinear; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
} drm_radeon_cmd_header_t; |
#define RADEON_WAIT_2D 0x1 |
#define RADEON_WAIT_3D 0x2 |
/* Allowed parameters for R300_CMD_PACKET3 |
*/ |
#define R300_CMD_PACKET3_CLEAR 0 |
#define R300_CMD_PACKET3_RAW 1 |
/* Commands understood by cmd_buffer ioctl for R300. |
* The interface has not been stabilized, so some of these may be removed |
* and eventually reordered before stabilization. |
*/ |
#define R300_CMD_PACKET0 1 |
#define R300_CMD_VPU 2 /* emit vertex program upload */ |
#define R300_CMD_PACKET3 3 /* emit a packet3 */ |
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ |
#define R300_CMD_CP_DELAY 5 |
#define R300_CMD_DMA_DISCARD 6 |
#define R300_CMD_WAIT 7 |
# define R300_WAIT_2D 0x1 |
# define R300_WAIT_3D 0x2 |
/* these two defines are DOING IT WRONG - however |
* we have userspace which relies on using these. |
* The wait interface is backwards compat new |
* code should use the NEW_WAIT defines below |
* THESE ARE NOT BIT FIELDS |
*/ |
# define R300_WAIT_2D_CLEAN 0x3 |
# define R300_WAIT_3D_CLEAN 0x4 |
# define R300_NEW_WAIT_2D_3D 0x3 |
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 |
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 |
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 |
#define R300_CMD_SCRATCH 8 |
#define R300_CMD_R500FP 9 |
typedef union { |
unsigned int u; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, count, reglo, reghi; |
} packet0; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi; |
} vpu; |
struct { |
unsigned char cmd_type, packet, pad0, pad1; |
} packet3; |
struct { |
unsigned char cmd_type, packet; |
unsigned short count; /* amount of packet2 to emit */ |
} delay; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
struct { |
unsigned char cmd_type, reg, n_bufs, flags; |
} scratch; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi_flags; |
} r500fp; |
} drm_r300_cmd_header_t; |
#define RADEON_FRONT 0x1 |
#define RADEON_BACK 0x2 |
#define RADEON_DEPTH 0x4 |
#define RADEON_STENCIL 0x8 |
#define RADEON_CLEAR_FASTZ 0x80000000 |
#define RADEON_USE_HIERZ 0x40000000 |
#define RADEON_USE_COMP_ZBUF 0x20000000 |
#define R500FP_CONSTANT_TYPE (1 << 1) |
#define R500FP_CONSTANT_CLAMP (1 << 2) |
/* Primitive types |
*/ |
#define RADEON_POINTS 0x1 |
#define RADEON_LINES 0x2 |
#define RADEON_LINE_STRIP 0x3 |
#define RADEON_TRIANGLES 0x4 |
#define RADEON_TRIANGLE_FAN 0x5 |
#define RADEON_TRIANGLE_STRIP 0x6 |
/* Vertex/indirect buffer size |
*/ |
#define RADEON_BUFFER_SIZE 65536 |
/* Byte offsets for indirect buffer data |
*/ |
#define RADEON_INDEX_PRIM_OFFSET 20 |
#define RADEON_SCRATCH_REG_OFFSET 32 |
#define R600_SCRATCH_REG_OFFSET 256 |
#define RADEON_NR_SAREA_CLIPRECTS 12 |
/* There are 2 heaps (local/GART). Each region within a heap is a |
* minimum of 64k, and there are at most 64 of them per heap. |
*/ |
#define RADEON_LOCAL_TEX_HEAP 0 |
#define RADEON_GART_TEX_HEAP 1 |
#define RADEON_NR_TEX_HEAPS 2 |
#define RADEON_NR_TEX_REGIONS 64 |
#define RADEON_LOG_TEX_GRANULARITY 16 |
#define RADEON_MAX_TEXTURE_LEVELS 12 |
#define RADEON_MAX_TEXTURE_UNITS 3 |
#define RADEON_MAX_SURFACES 8 |
/* Blits have strict offset rules. All blit offset must be aligned on |
* a 1K-byte boundary. |
*/ |
#define RADEON_OFFSET_SHIFT 10 |
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) |
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) |
#endif /* __RADEON_SAREA_DEFINES__ */ |
typedef struct { |
unsigned int red; |
unsigned int green; |
unsigned int blue; |
unsigned int alpha; |
} radeon_color_regs_t; |
typedef struct { |
/* Context state */ |
unsigned int pp_misc; /* 0x1c14 */ |
unsigned int pp_fog_color; |
unsigned int re_solid_color; |
unsigned int rb3d_blendcntl; |
unsigned int rb3d_depthoffset; |
unsigned int rb3d_depthpitch; |
unsigned int rb3d_zstencilcntl; |
unsigned int pp_cntl; /* 0x1c38 */ |
unsigned int rb3d_cntl; |
unsigned int rb3d_coloroffset; |
unsigned int re_width_height; |
unsigned int rb3d_colorpitch; |
unsigned int se_cntl; |
/* Vertex format state */ |
unsigned int se_coord_fmt; /* 0x1c50 */ |
/* Line state */ |
unsigned int re_line_pattern; /* 0x1cd0 */ |
unsigned int re_line_state; |
unsigned int se_line_width; /* 0x1db8 */ |
/* Bumpmap state */ |
unsigned int pp_lum_matrix; /* 0x1d00 */ |
unsigned int pp_rot_matrix_0; /* 0x1d58 */ |
unsigned int pp_rot_matrix_1; |
/* Mask state */ |
unsigned int rb3d_stencilrefmask; /* 0x1d7c */ |
unsigned int rb3d_ropcntl; |
unsigned int rb3d_planemask; |
/* Viewport state */ |
unsigned int se_vport_xscale; /* 0x1d98 */ |
unsigned int se_vport_xoffset; |
unsigned int se_vport_yscale; |
unsigned int se_vport_yoffset; |
unsigned int se_vport_zscale; |
unsigned int se_vport_zoffset; |
/* Setup state */ |
unsigned int se_cntl_status; /* 0x2140 */ |
/* Misc state */ |
unsigned int re_top_left; /* 0x26c0 */ |
unsigned int re_misc; |
} drm_radeon_context_regs_t; |
typedef struct { |
/* Zbias state */ |
unsigned int se_zbias_factor; /* 0x1dac */ |
unsigned int se_zbias_constant; |
} drm_radeon_context2_regs_t; |
/* Setup registers for each texture unit |
*/ |
typedef struct { |
unsigned int pp_txfilter; |
unsigned int pp_txformat; |
unsigned int pp_txoffset; |
unsigned int pp_txcblend; |
unsigned int pp_txablend; |
unsigned int pp_tfactor; |
unsigned int pp_border_color; |
} drm_radeon_texture_regs_t; |
typedef struct { |
unsigned int start; |
unsigned int finish; |
unsigned int prim:8; |
unsigned int stateidx:8; |
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ |
unsigned int vc_format; /* vertex format */ |
} drm_radeon_prim_t; |
typedef struct { |
drm_radeon_context_regs_t context; |
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; |
drm_radeon_context2_regs_t context2; |
unsigned int dirty; |
} drm_radeon_state_t; |
typedef struct { |
/* The channel for communication of state information to the |
* kernel on firing a vertex buffer with either of the |
* obsoleted vertex/index ioctls. |
*/ |
drm_radeon_context_regs_t context_state; |
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; |
unsigned int dirty; |
unsigned int vertsize; |
unsigned int vc_format; |
/* The current cliprects, or a subset thereof. |
*/ |
struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; |
unsigned int nbox; |
/* Counters for client-side throttling of rendering clients. |
*/ |
unsigned int last_frame; |
unsigned int last_dispatch; |
unsigned int last_clear; |
struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + |
1]; |
unsigned int tex_age[RADEON_NR_TEX_HEAPS]; |
int ctx_owner; |
int pfState; /* number of 3d windows (0,1,2ormore) */ |
int pfCurrentPage; /* which buffer is being displayed? */ |
int crtc2_base; /* CRTC2 frame offset */ |
int tiling_enabled; /* set by drm, read by 2d + 3d clients */ |
} drm_radeon_sarea_t; |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the Xserver file (xf86drmRadeon.h) |
* |
* KW: actually it's illegal to change any of this (backwards compatibility). |
*/ |
/* Radeon specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_RADEON_CP_INIT 0x00 |
#define DRM_RADEON_CP_START 0x01 |
#define DRM_RADEON_CP_STOP 0x02 |
#define DRM_RADEON_CP_RESET 0x03 |
#define DRM_RADEON_CP_IDLE 0x04 |
#define DRM_RADEON_RESET 0x05 |
#define DRM_RADEON_FULLSCREEN 0x06 |
#define DRM_RADEON_SWAP 0x07 |
#define DRM_RADEON_CLEAR 0x08 |
#define DRM_RADEON_VERTEX 0x09 |
#define DRM_RADEON_INDICES 0x0A |
#define DRM_RADEON_NOT_USED |
#define DRM_RADEON_STIPPLE 0x0C |
#define DRM_RADEON_INDIRECT 0x0D |
#define DRM_RADEON_TEXTURE 0x0E |
#define DRM_RADEON_VERTEX2 0x0F |
#define DRM_RADEON_CMDBUF 0x10 |
#define DRM_RADEON_GETPARAM 0x11 |
#define DRM_RADEON_FLIP 0x12 |
#define DRM_RADEON_ALLOC 0x13 |
#define DRM_RADEON_FREE 0x14 |
#define DRM_RADEON_INIT_HEAP 0x15 |
#define DRM_RADEON_IRQ_EMIT 0x16 |
#define DRM_RADEON_IRQ_WAIT 0x17 |
#define DRM_RADEON_CP_RESUME 0x18 |
#define DRM_RADEON_SETPARAM 0x19 |
#define DRM_RADEON_SURF_ALLOC 0x1a |
#define DRM_RADEON_SURF_FREE 0x1b |
/* KMS ioctl */ |
#define DRM_RADEON_GEM_INFO 0x1c |
#define DRM_RADEON_GEM_CREATE 0x1d |
#define DRM_RADEON_GEM_MMAP 0x1e |
#define DRM_RADEON_GEM_PREAD 0x21 |
#define DRM_RADEON_GEM_PWRITE 0x22 |
#define DRM_RADEON_GEM_SET_DOMAIN 0x23 |
#define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
#define DRM_RADEON_CS 0x26 |
#define DRM_RADEON_INFO 0x27 |
#define DRM_RADEON_GEM_SET_TILING 0x28 |
#define DRM_RADEON_GEM_GET_TILING 0x29 |
#define DRM_RADEON_GEM_BUSY 0x2a |
#define DRM_RADEON_GEM_VA 0x2b |
#define DRM_RADEON_GEM_OP 0x2c |
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) |
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) |
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) |
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) |
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) |
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) |
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) |
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) |
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) |
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) |
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) |
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) |
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) |
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) |
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) |
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) |
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) |
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) |
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) |
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) |
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) |
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) |
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) |
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) |
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) |
/* KMS */ |
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) |
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) |
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) |
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) |
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) |
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) |
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
#define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
#define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
#define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
#define DRM_IOCTL_RADEON_GEM_VA DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_VA, struct drm_radeon_gem_va) |
#define DRM_IOCTL_RADEON_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_OP, struct drm_radeon_gem_op) |
typedef struct drm_radeon_init { |
enum { |
RADEON_INIT_CP = 0x01, |
RADEON_CLEANUP_CP = 0x02, |
RADEON_INIT_R200_CP = 0x03, |
RADEON_INIT_R300_CP = 0x04, |
RADEON_INIT_R600_CP = 0x05 |
} func; |
unsigned long sarea_priv_offset; |
int is_pci; |
int cp_mode; |
int gart_size; |
int ring_size; |
int usec_timeout; |
unsigned int fb_bpp; |
unsigned int front_offset, front_pitch; |
unsigned int back_offset, back_pitch; |
unsigned int depth_bpp; |
unsigned int depth_offset, depth_pitch; |
unsigned long fb_offset; |
unsigned long mmio_offset; |
unsigned long ring_offset; |
unsigned long ring_rptr_offset; |
unsigned long buffers_offset; |
unsigned long gart_textures_offset; |
} drm_radeon_init_t; |
typedef struct drm_radeon_cp_stop { |
int flush; |
int idle; |
} drm_radeon_cp_stop_t; |
typedef struct drm_radeon_fullscreen { |
enum { |
RADEON_INIT_FULLSCREEN = 0x01, |
RADEON_CLEANUP_FULLSCREEN = 0x02 |
} func; |
} drm_radeon_fullscreen_t; |
#define CLEAR_X1 0 |
#define CLEAR_Y1 1 |
#define CLEAR_X2 2 |
#define CLEAR_Y2 3 |
#define CLEAR_DEPTH 4 |
typedef union drm_radeon_clear_rect { |
float f[5]; |
unsigned int ui[5]; |
} drm_radeon_clear_rect_t; |
typedef struct drm_radeon_clear { |
unsigned int flags; |
unsigned int clear_color; |
unsigned int clear_depth; |
unsigned int color_mask; |
unsigned int depth_mask; /* misnamed field: should be stencil */ |
drm_radeon_clear_rect_t __user *depth_boxes; |
} drm_radeon_clear_t; |
typedef struct drm_radeon_vertex { |
int prim; |
int idx; /* Index of vertex buffer */ |
int count; /* Number of vertices in buffer */ |
int discard; /* Client finished with buffer? */ |
} drm_radeon_vertex_t; |
typedef struct drm_radeon_indices { |
int prim; |
int idx; |
int start; |
int end; |
int discard; /* Client finished with buffer? */ |
} drm_radeon_indices_t; |
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices |
* - allows multiple primitives and state changes in a single ioctl |
* - supports driver change to emit native primitives |
*/ |
typedef struct drm_radeon_vertex2 { |
int idx; /* Index of vertex buffer */ |
int discard; /* Client finished with buffer? */ |
int nr_states; |
drm_radeon_state_t __user *state; |
int nr_prims; |
drm_radeon_prim_t __user *prim; |
} drm_radeon_vertex2_t; |
/* v1.3 - obsoletes drm_radeon_vertex2 |
* - allows arbitrarily large cliprect list |
* - allows updating of tcl packet, vector and scalar state |
* - allows memory-efficient description of state updates |
* - allows state to be emitted without a primitive |
* (for clears, ctx switches) |
* - allows more than one dma buffer to be referenced per ioctl |
* - supports tcl driver |
* - may be extended in future versions with new cmd types, packets |
*/ |
typedef struct drm_radeon_cmd_buffer { |
int bufsz; |
char __user *buf; |
int nbox; |
struct drm_clip_rect __user *boxes; |
} drm_radeon_cmd_buffer_t; |
typedef struct drm_radeon_tex_image { |
unsigned int x, y; /* Blit coordinates */ |
unsigned int width, height; |
const void __user *data; |
} drm_radeon_tex_image_t; |
typedef struct drm_radeon_texture { |
unsigned int offset; |
int pitch; |
int format; |
int width; /* Texture image coordinates */ |
int height; |
drm_radeon_tex_image_t __user *image; |
} drm_radeon_texture_t; |
typedef struct drm_radeon_stipple { |
unsigned int __user *mask; |
} drm_radeon_stipple_t; |
typedef struct drm_radeon_indirect { |
int idx; |
int start; |
int end; |
int discard; |
} drm_radeon_indirect_t; |
/* enum for card type parameters */ |
#define RADEON_CARD_PCI 0 |
#define RADEON_CARD_AGP 1 |
#define RADEON_CARD_PCIE 2 |
/* 1.3: An ioctl to get parameters that aren't available to the 3d |
* client any other way. |
*/ |
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ |
#define RADEON_PARAM_LAST_FRAME 2 |
#define RADEON_PARAM_LAST_DISPATCH 3 |
#define RADEON_PARAM_LAST_CLEAR 4 |
/* Added with DRM version 1.6. */ |
#define RADEON_PARAM_IRQ_NR 5 |
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ |
/* Added with DRM version 1.8. */ |
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ |
#define RADEON_PARAM_STATUS_HANDLE 8 |
#define RADEON_PARAM_SAREA_HANDLE 9 |
#define RADEON_PARAM_GART_TEX_HANDLE 10 |
#define RADEON_PARAM_SCRATCH_OFFSET 11 |
#define RADEON_PARAM_CARD_TYPE 12 |
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ |
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ |
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ |
#define RADEON_PARAM_DEVICE_ID 16 |
#define RADEON_PARAM_NUM_Z_PIPES 17 /* num Z pipes */ |
typedef struct drm_radeon_getparam { |
int param; |
void __user *value; |
} drm_radeon_getparam_t; |
/* 1.6: Set up a memory manager for regions of shared memory: |
*/ |
#define RADEON_MEM_REGION_GART 1 |
#define RADEON_MEM_REGION_FB 2 |
typedef struct drm_radeon_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or GART */ |
} drm_radeon_mem_alloc_t; |
typedef struct drm_radeon_mem_free { |
int region; |
int region_offset; |
} drm_radeon_mem_free_t; |
typedef struct drm_radeon_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_radeon_mem_init_heap_t; |
/* 1.6: Userspace can request & wait on irq's: |
*/ |
typedef struct drm_radeon_irq_emit { |
int __user *irq_seq; |
} drm_radeon_irq_emit_t; |
typedef struct drm_radeon_irq_wait { |
int irq_seq; |
} drm_radeon_irq_wait_t; |
/* 1.10: Clients tell the DRM where they think the framebuffer is located in |
* the card's address space, via a new generic ioctl to set parameters |
*/ |
typedef struct drm_radeon_setparam { |
unsigned int param; |
__s64 value; |
} drm_radeon_setparam_t; |
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ |
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ |
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ |
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ |
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ |
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ |
/* 1.14: Clients can allocate/free a surface |
*/ |
typedef struct drm_radeon_surface_alloc { |
unsigned int address; |
unsigned int size; |
unsigned int flags; |
} drm_radeon_surface_alloc_t; |
typedef struct drm_radeon_surface_free { |
unsigned int address; |
} drm_radeon_surface_free_t; |
#define DRM_RADEON_VBLANK_CRTC1 1 |
#define DRM_RADEON_VBLANK_CRTC2 2 |
/* |
* Kernel modesetting world below. |
*/ |
#define RADEON_GEM_DOMAIN_CPU 0x1 |
#define RADEON_GEM_DOMAIN_GTT 0x2 |
#define RADEON_GEM_DOMAIN_VRAM 0x4 |
struct drm_radeon_gem_info { |
uint64_t gart_size; |
uint64_t vram_size; |
uint64_t vram_visible; |
}; |
#define RADEON_GEM_NO_BACKING_STORE (1 << 0) |
#define RADEON_GEM_GTT_UC (1 << 1) |
#define RADEON_GEM_GTT_WC (1 << 2) |
struct drm_radeon_gem_create { |
uint64_t size; |
uint64_t alignment; |
uint32_t handle; |
uint32_t initial_domain; |
uint32_t flags; |
}; |
#define RADEON_TILING_MACRO 0x1 |
#define RADEON_TILING_MICRO 0x2 |
#define RADEON_TILING_SWAP_16BIT 0x4 |
#define RADEON_TILING_SWAP_32BIT 0x8 |
/* this object requires a surface when mapped - i.e. front buffer */ |
#define RADEON_TILING_SURFACE 0x10 |
#define RADEON_TILING_MICRO_SQUARE 0x20 |
#define RADEON_TILING_EG_BANKW_SHIFT 8 |
#define RADEON_TILING_EG_BANKW_MASK 0xf |
#define RADEON_TILING_EG_BANKH_SHIFT 12 |
#define RADEON_TILING_EG_BANKH_MASK 0xf |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16 |
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf |
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24 |
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28 |
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf |
struct drm_radeon_gem_set_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_get_tiling { |
uint32_t handle; |
uint32_t tiling_flags; |
uint32_t pitch; |
}; |
struct drm_radeon_gem_mmap { |
uint32_t handle; |
uint32_t pad; |
uint64_t offset; |
uint64_t size; |
uint64_t addr_ptr; |
}; |
struct drm_radeon_gem_set_domain { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
}; |
struct drm_radeon_gem_wait_idle { |
uint32_t handle; |
uint32_t pad; |
}; |
struct drm_radeon_gem_busy { |
uint32_t handle; |
uint32_t domain; |
}; |
struct drm_radeon_gem_pread { |
/** Handle for the object being read. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to read from */ |
uint64_t offset; |
/** Length of data to read */ |
uint64_t size; |
/** Pointer to write the data into. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
struct drm_radeon_gem_pwrite { |
/** Handle for the object being written to. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to write to */ |
uint64_t offset; |
/** Length of data to write */ |
uint64_t size; |
/** Pointer to read the data from. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
/* Sets or returns a value associated with a buffer. */ |
struct drm_radeon_gem_op { |
uint32_t handle; /* buffer */ |
uint32_t op; /* RADEON_GEM_OP_* */ |
uint64_t value; /* input or return value */ |
}; |
#define RADEON_GEM_OP_GET_INITIAL_DOMAIN 0 |
#define RADEON_GEM_OP_SET_INITIAL_DOMAIN 1 |
#define RADEON_VA_MAP 1 |
#define RADEON_VA_UNMAP 2 |
#define RADEON_VA_RESULT_OK 0 |
#define RADEON_VA_RESULT_ERROR 1 |
#define RADEON_VA_RESULT_VA_EXIST 2 |
#define RADEON_VM_PAGE_VALID (1 << 0) |
#define RADEON_VM_PAGE_READABLE (1 << 1) |
#define RADEON_VM_PAGE_WRITEABLE (1 << 2) |
#define RADEON_VM_PAGE_SYSTEM (1 << 3) |
#define RADEON_VM_PAGE_SNOOPED (1 << 4) |
struct drm_radeon_gem_va { |
uint32_t handle; |
uint32_t operation; |
uint32_t vm_id; |
uint32_t flags; |
uint64_t offset; |
}; |
#define RADEON_CHUNK_ID_RELOCS 0x01 |
#define RADEON_CHUNK_ID_IB 0x02 |
#define RADEON_CHUNK_ID_FLAGS 0x03 |
#define RADEON_CHUNK_ID_CONST_IB 0x04 |
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ |
#define RADEON_CS_KEEP_TILING_FLAGS 0x01 |
#define RADEON_CS_USE_VM 0x02 |
#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */ |
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ |
#define RADEON_CS_RING_GFX 0 |
#define RADEON_CS_RING_COMPUTE 1 |
#define RADEON_CS_RING_DMA 2 |
#define RADEON_CS_RING_UVD 3 |
#define RADEON_CS_RING_VCE 4 |
/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ |
/* 0 = normal, + = higher priority, - = lower priority */ |
struct drm_radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint64_t chunk_data; |
}; |
/* drm_radeon_cs_reloc.flags */ |
struct drm_radeon_cs_reloc { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
uint32_t flags; |
}; |
struct drm_radeon_cs { |
uint32_t num_chunks; |
uint32_t cs_id; |
/* this points to uint64_t * which point to cs chunks */ |
uint64_t chunks; |
/* updates to the limits after this CS ioctl */ |
uint64_t gart_limit; |
uint64_t vram_limit; |
}; |
#define RADEON_INFO_DEVICE_ID 0x00 |
#define RADEON_INFO_NUM_GB_PIPES 0x01 |
#define RADEON_INFO_NUM_Z_PIPES 0x02 |
#define RADEON_INFO_ACCEL_WORKING 0x03 |
#define RADEON_INFO_CRTC_FROM_ID 0x04 |
#define RADEON_INFO_ACCEL_WORKING2 0x05 |
#define RADEON_INFO_TILING_CONFIG 0x06 |
#define RADEON_INFO_WANT_HYPERZ 0x07 |
#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */ |
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */ |
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */ |
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */ |
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */ |
#define RADEON_INFO_BACKEND_MAP 0x0d /* pipe to backend map, needed by mesa */ |
/* virtual address start, va < start are reserved by the kernel */ |
#define RADEON_INFO_VA_START 0x0e |
/* maximum size of ib using the virtual memory cs */ |
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f |
/* max pipes - needed for compute shaders */ |
#define RADEON_INFO_MAX_PIPES 0x10 |
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ |
#define RADEON_INFO_TIMESTAMP 0x11 |
/* max shader engines (SE) - needed for geometry shaders, etc. */ |
#define RADEON_INFO_MAX_SE 0x12 |
/* max SH per SE */ |
#define RADEON_INFO_MAX_SH_PER_SE 0x13 |
/* fast fb access is enabled */ |
#define RADEON_INFO_FASTFB_WORKING 0x14 |
/* query if a RADEON_CS_RING_* submission is supported */ |
#define RADEON_INFO_RING_WORKING 0x15 |
/* SI tile mode array */ |
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16 |
/* query if CP DMA is supported on the compute ring */ |
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17 |
/* CIK macrotile mode array */ |
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18 |
/* query the number of render backends */ |
#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19 |
/* max engine clock - needed for OpenCL */ |
#define RADEON_INFO_MAX_SCLK 0x1a |
/* version of VCE firmware */ |
#define RADEON_INFO_VCE_FW_VERSION 0x1b |
/* version of VCE feedback */ |
#define RADEON_INFO_VCE_FB_VERSION 0x1c |
#define RADEON_INFO_NUM_BYTES_MOVED 0x1d |
#define RADEON_INFO_VRAM_USAGE 0x1e |
#define RADEON_INFO_GTT_USAGE 0x1f |
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20 |
struct drm_radeon_info { |
uint32_t request; |
uint32_t pad; |
uint64_t value; |
}; |
/* Those correspond to the tile index to use, this is to explicitly state |
* the API that is implicitly defined by the tile mode array. |
*/ |
#define SI_TILE_MODE_COLOR_LINEAR_ALIGNED 8 |
#define SI_TILE_MODE_COLOR_1D 13 |
#define SI_TILE_MODE_COLOR_1D_SCANOUT 9 |
#define SI_TILE_MODE_COLOR_2D_8BPP 14 |
#define SI_TILE_MODE_COLOR_2D_16BPP 15 |
#define SI_TILE_MODE_COLOR_2D_32BPP 16 |
#define SI_TILE_MODE_COLOR_2D_64BPP 17 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP 11 |
#define SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP 12 |
#define SI_TILE_MODE_DEPTH_STENCIL_1D 4 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D 0 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_2AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_4AA 3 |
#define SI_TILE_MODE_DEPTH_STENCIL_2D_8AA 2 |
#define CIK_TILE_MODE_DEPTH_STENCIL_1D 5 |
#endif |
/drivers/include/linux/uapi/drm/vmwgfx_drm.h |
---|
87,8 → 87,18 |
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7 |
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8 |
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 |
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10 |
/** |
* enum drm_vmw_handle_type - handle type for ref ioctls |
* |
*/ |
enum drm_vmw_handle_type { |
DRM_VMW_HANDLE_LEGACY = 0, |
DRM_VMW_HANDLE_PRIME = 1 |
}; |
/** |
* struct drm_vmw_getparam_arg |
* |
* @value: Returned value. //Out |
176,6 → 186,7 |
* struct drm_wmv_surface_arg |
* |
* @sid: Surface id of created surface or surface to destroy or reference. |
* @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl. |
* |
* Output data from the DRM_VMW_CREATE_SURFACE Ioctl. |
* Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. |
184,7 → 195,7 |
struct drm_vmw_surface_arg { |
int32_t sid; |
uint32_t pad64; |
enum drm_vmw_handle_type handle_type; |
}; |
/** |
/drivers/include/linux/wait.h |
---|
109,9 → 109,15 |
DestroyEvent(__wait.evnt); \ |
} while (0) |
#define wait_event_interruptible(wq, condition) \ |
({ \ |
int __ret = 0; \ |
if (!(condition)) \ |
wait_event(wq, condition); \ |
__ret; \ |
}) |
static inline |
void wake_up_all(wait_queue_head_t *q) |
{ |
/drivers/include/linux/ww_mutex.h |
---|
0,0 → 1,381 |
/* |
* Wound/Wait Mutexes: blocking mutual exclusion locks with deadlock avoidance |
* |
* Original mutex implementation started by Ingo Molnar: |
* |
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
* |
* Wound/wait implementation: |
* Copyright (C) 2013 Canonical Ltd. |
* |
* This file contains the main data structure and API definitions. |
*/ |
#ifndef __LINUX_WW_MUTEX_H |
#define __LINUX_WW_MUTEX_H |
#include <linux/mutex.h> |
#include <syscall.h> |
#define current (void*)GetPid() |
struct ww_class { |
atomic_long_t stamp; |
struct lock_class_key acquire_key; |
struct lock_class_key mutex_key; |
const char *acquire_name; |
const char *mutex_name; |
}; |
struct ww_acquire_ctx { |
struct task_struct *task; |
unsigned long stamp; |
unsigned acquired; |
#ifdef CONFIG_DEBUG_MUTEXES |
unsigned done_acquire; |
struct ww_class *ww_class; |
struct ww_mutex *contending_lock; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
struct lockdep_map dep_map; |
#endif |
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
unsigned deadlock_inject_interval; |
unsigned deadlock_inject_countdown; |
#endif |
}; |
struct ww_mutex { |
struct mutex base; |
struct ww_acquire_ctx *ctx; |
#ifdef CONFIG_DEBUG_MUTEXES |
struct ww_class *ww_class; |
#endif |
}; |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) \ |
, .ww_class = &ww_class |
#else |
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, ww_class) |
#endif |
#define __WW_CLASS_INITIALIZER(ww_class) \ |
{ .stamp = ATOMIC_LONG_INIT(0) \ |
, .acquire_name = #ww_class "_acquire" \ |
, .mutex_name = #ww_class "_mutex" } |
#define __WW_MUTEX_INITIALIZER(lockname, class) \ |
{ .base = { \__MUTEX_INITIALIZER(lockname) } \ |
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) } |
#define DEFINE_WW_CLASS(classname) \ |
struct ww_class classname = __WW_CLASS_INITIALIZER(classname) |
#define DEFINE_WW_MUTEX(mutexname, ww_class) \ |
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class) |
/** |
* ww_mutex_init - initialize the w/w mutex |
* @lock: the mutex to be initialized |
* @ww_class: the w/w class the mutex should belong to |
* |
* Initialize the w/w mutex to unlocked state and associate it with the given |
* class. |
* |
* It is not allowed to initialize an already locked mutex. |
*/ |
static inline void ww_mutex_init(struct ww_mutex *lock, |
struct ww_class *ww_class) |
{ |
MutexInit(&lock->base); |
lock->ctx = NULL; |
#ifdef CONFIG_DEBUG_MUTEXES |
lock->ww_class = ww_class; |
#endif |
} |
/** |
* ww_acquire_init - initialize a w/w acquire context |
* @ctx: w/w acquire context to initialize |
* @ww_class: w/w class of the context |
* |
* Initializes an context to acquire multiple mutexes of the given w/w class. |
* |
* Context-based w/w mutex acquiring can be done in any order whatsoever within |
* a given lock class. Deadlocks will be detected and handled with the |
* wait/wound logic. |
* |
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can |
* result in undetected deadlocks and is so forbidden. Mixing different contexts |
* for the same w/w class when acquiring mutexes can also result in undetected |
* deadlocks, and is hence also forbidden. Both types of abuse will be caught by |
* enabling CONFIG_PROVE_LOCKING. |
* |
* Nesting of acquire contexts for _different_ w/w classes is possible, subject |
* to the usual locking rules between different lock classes. |
* |
* An acquire context must be released with ww_acquire_fini by the same task |
* before the memory is freed. It is recommended to allocate the context itself |
* on the stack. |
*/ |
static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, |
struct ww_class *ww_class) |
{ |
ctx->task = current; |
ctx->stamp = atomic_long_inc_return(&ww_class->stamp); |
ctx->acquired = 0; |
#ifdef CONFIG_DEBUG_MUTEXES |
ctx->ww_class = ww_class; |
ctx->done_acquire = 0; |
ctx->contending_lock = NULL; |
#endif |
#ifdef CONFIG_DEBUG_LOCK_ALLOC |
debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); |
lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, |
&ww_class->acquire_key, 0); |
mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); |
#endif |
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
ctx->deadlock_inject_interval = 1; |
ctx->deadlock_inject_countdown = ctx->stamp & 0xf; |
#endif |
} |
/** |
* ww_acquire_done - marks the end of the acquire phase |
* @ctx: the acquire context |
* |
* Marks the end of the acquire phase, any further w/w mutex lock calls using |
* this context are forbidden. |
* |
* Calling this function is optional, it is just useful to document w/w mutex |
* code and clearly designated the acquire phase from actually using the locked |
* data structures. |
*/ |
static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) |
{ |
#ifdef CONFIG_DEBUG_MUTEXES |
lockdep_assert_held(ctx); |
DEBUG_LOCKS_WARN_ON(ctx->done_acquire); |
ctx->done_acquire = 1; |
#endif |
} |
/** |
* ww_acquire_fini - releases a w/w acquire context |
* @ctx: the acquire context to free |
* |
* Releases a w/w acquire context. This must be called _after_ all acquired w/w |
* mutexes have been released with ww_mutex_unlock. |
*/ |
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) |
{ |
#ifdef CONFIG_DEBUG_MUTEXES |
mutex_release(&ctx->dep_map, 0, _THIS_IP_); |
DEBUG_LOCKS_WARN_ON(ctx->acquired); |
if (!config_enabled(CONFIG_PROVE_LOCKING)) |
/* |
* lockdep will normally handle this, |
* but fail without anyway |
*/ |
ctx->done_acquire = 1; |
if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC)) |
/* ensure ww_acquire_fini will still fail if called twice */ |
ctx->acquired = ~0U; |
#endif |
} |
extern int __must_check __ww_mutex_lock(struct ww_mutex *lock, |
struct ww_acquire_ctx *ctx); |
extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, |
struct ww_acquire_ctx *ctx); |
/** |
* ww_mutex_lock - acquire the w/w mutex |
* @lock: the mutex to be acquired |
* @ctx: w/w acquire context, or NULL to acquire only a single lock. |
* |
* Lock the w/w mutex exclusively for this task. |
* |
* Deadlocks within a given w/w class of locks are detected and handled with the |
* wait/wound algorithm. If the lock isn't immediately avaiable this function |
* will either sleep until it is (wait case). Or it selects the current context |
* for backing off by returning -EDEADLK (wound case). Trying to acquire the |
* same lock with the same context twice is also detected and signalled by |
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. |
* |
* In the wound case the caller must release all currently held w/w mutexes for |
* the given context and then wait for this contending lock to be available by |
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this |
* lock and proceed with trying to acquire further w/w mutexes (e.g. when |
* scanning through lru lists trying to free resources). |
* |
* The mutex must later on be released by the same task that |
* acquired it. The task may not exit without first unlocking the mutex. Also, |
* kernel memory where the mutex resides must not be freed with the mutex still |
* locked. The mutex must first be initialized (or statically defined) before it |
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be |
* of the same w/w lock class as was used to initialize the acquire context. |
* |
* A mutex acquired with this function must be released with ww_mutex_unlock. |
*/ |
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
{ |
if (ctx) |
return __ww_mutex_lock(lock, ctx); |
mutex_lock(&lock->base); |
return 0; |
} |
/** |
* ww_mutex_lock_interruptible - acquire the w/w mutex, interruptible |
* @lock: the mutex to be acquired |
* @ctx: w/w acquire context |
* |
* Lock the w/w mutex exclusively for this task. |
* |
* Deadlocks within a given w/w class of locks are detected and handled with the |
* wait/wound algorithm. If the lock isn't immediately avaiable this function |
* will either sleep until it is (wait case). Or it selects the current context |
* for backing off by returning -EDEADLK (wound case). Trying to acquire the |
* same lock with the same context twice is also detected and signalled by |
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a |
* signal arrives while waiting for the lock then this function returns -EINTR. |
* |
* In the wound case the caller must release all currently held w/w mutexes for |
* the given context and then wait for this contending lock to be available by |
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to |
* not acquire this lock and proceed with trying to acquire further w/w mutexes |
* (e.g. when scanning through lru lists trying to free resources). |
* |
* The mutex must later on be released by the same task that |
* acquired it. The task may not exit without first unlocking the mutex. Also, |
* kernel memory where the mutex resides must not be freed with the mutex still |
* locked. The mutex must first be initialized (or statically defined) before it |
* can be locked. memset()-ing the mutex to 0 is not allowed. The mutex must be |
* of the same w/w lock class as was used to initialize the acquire context. |
* |
* A mutex acquired with this function must be released with ww_mutex_unlock. |
*/ |
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, |
struct ww_acquire_ctx *ctx) |
{ |
if (ctx) |
return __ww_mutex_lock_interruptible(lock, ctx); |
else |
return mutex_lock_interruptible(&lock->base); |
} |
/** |
* ww_mutex_lock_slow - slowpath acquiring of the w/w mutex |
* @lock: the mutex to be acquired |
* @ctx: w/w acquire context |
* |
* Acquires a w/w mutex with the given context after a wound case. This function |
* will sleep until the lock becomes available. |
* |
* The caller must have released all w/w mutexes already acquired with the |
* context and then call this function on the contended lock. |
* |
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it |
* needs with ww_mutex_lock. Note that the -EALREADY return code from |
* ww_mutex_lock can be used to avoid locking this contended mutex twice. |
* |
* It is forbidden to call this function with any other w/w mutexes associated |
* with the context held. It is forbidden to call this on anything else than the |
* contending mutex. |
* |
* Note that the slowpath lock acquiring can also be done by calling |
* ww_mutex_lock directly. This function here is simply to help w/w mutex |
* locking code readability by clearly denoting the slowpath. |
*/ |
static inline void |
ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
{ |
int ret; |
#ifdef CONFIG_DEBUG_MUTEXES |
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); |
#endif |
ret = ww_mutex_lock(lock, ctx); |
(void)ret; |
} |
/** |
* ww_mutex_lock_slow_interruptible - slowpath acquiring of the w/w mutex, interruptible |
* @lock: the mutex to be acquired |
* @ctx: w/w acquire context |
* |
* Acquires a w/w mutex with the given context after a wound case. This function |
* will sleep until the lock becomes available and returns 0 when the lock has |
* been acquired. If a signal arrives while waiting for the lock then this |
* function returns -EINTR. |
* |
* The caller must have released all w/w mutexes already acquired with the |
* context and then call this function on the contended lock. |
* |
* Afterwards the caller may continue to (re)acquire the other w/w mutexes it |
* needs with ww_mutex_lock. Note that the -EALREADY return code from |
* ww_mutex_lock can be used to avoid locking this contended mutex twice. |
* |
* It is forbidden to call this function with any other w/w mutexes associated |
* with the given context held. It is forbidden to call this on anything else |
* than the contending mutex. |
* |
* Note that the slowpath lock acquiring can also be done by calling |
* ww_mutex_lock_interruptible directly. This function here is simply to help |
* w/w mutex locking code readability by clearly denoting the slowpath. |
*/ |
static inline int __must_check |
ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, |
struct ww_acquire_ctx *ctx) |
{ |
#ifdef CONFIG_DEBUG_MUTEXES |
DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); |
#endif |
return ww_mutex_lock_interruptible(lock, ctx); |
} |
extern void ww_mutex_unlock(struct ww_mutex *lock); |
/** |
* ww_mutex_trylock - tries to acquire the w/w mutex without acquire context |
* @lock: mutex to lock |
* |
* Trylocks a mutex without acquire context, so no deadlock detection is |
* possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. |
*/ |
static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) |
{ |
return mutex_trylock(&lock->base); |
} |
/*** |
* ww_mutex_destroy - mark a w/w mutex unusable |
* @lock: the mutex to be destroyed |
* |
* This function marks the mutex uninitialized, and any subsequent |
* use of the mutex is forbidden. The mutex must not be locked when |
* this function is called. |
*/ |
static inline void ww_mutex_destroy(struct ww_mutex *lock) |
{ |
mutex_destroy(&lock->base); |
} |
/** |
* ww_mutex_is_locked - is the w/w mutex locked |
* @lock: the mutex to be queried |
* |
* Returns 1 if the mutex is locked, 0 if unlocked. |
*/ |
static inline bool ww_mutex_is_locked(struct ww_mutex *lock) |
{ |
return mutex_is_locked(&lock->base); |
} |
#endif |
/drivers/include/video/mipi_display.h |
---|
0,0 → 1,130 |
/* |
* Defines for Mobile Industry Processor Interface (MIPI(R)) |
* Display Working Group standards: DSI, DCS, DBI, DPI |
* |
* Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
* Copyright (C) 2006 Nokia Corporation |
* Author: Imre Deak <imre.deak@nokia.com> |
* |
* This program is free software; you can redistribute it and/or modify |
* it under the terms of the GNU General Public License version 2 as |
* published by the Free Software Foundation. |
*/ |
#ifndef MIPI_DISPLAY_H |
#define MIPI_DISPLAY_H |
/* MIPI DSI Processor-to-Peripheral transaction types */ |
enum { |
MIPI_DSI_V_SYNC_START = 0x01, |
MIPI_DSI_V_SYNC_END = 0x11, |
MIPI_DSI_H_SYNC_START = 0x21, |
MIPI_DSI_H_SYNC_END = 0x31, |
MIPI_DSI_COLOR_MODE_OFF = 0x02, |
MIPI_DSI_COLOR_MODE_ON = 0x12, |
MIPI_DSI_SHUTDOWN_PERIPHERAL = 0x22, |
MIPI_DSI_TURN_ON_PERIPHERAL = 0x32, |
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM = 0x03, |
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM = 0x13, |
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM = 0x23, |
MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM = 0x04, |
MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM = 0x14, |
MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM = 0x24, |
MIPI_DSI_DCS_SHORT_WRITE = 0x05, |
MIPI_DSI_DCS_SHORT_WRITE_PARAM = 0x15, |
MIPI_DSI_DCS_READ = 0x06, |
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37, |
MIPI_DSI_END_OF_TRANSMISSION = 0x08, |
MIPI_DSI_NULL_PACKET = 0x09, |
MIPI_DSI_BLANKING_PACKET = 0x19, |
MIPI_DSI_GENERIC_LONG_WRITE = 0x29, |
MIPI_DSI_DCS_LONG_WRITE = 0x39, |
MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 0x0c, |
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 0x1c, |
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 0x2c, |
MIPI_DSI_PACKED_PIXEL_STREAM_30 = 0x0d, |
MIPI_DSI_PACKED_PIXEL_STREAM_36 = 0x1d, |
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12 = 0x3d, |
MIPI_DSI_PACKED_PIXEL_STREAM_16 = 0x0e, |
MIPI_DSI_PACKED_PIXEL_STREAM_18 = 0x1e, |
MIPI_DSI_PIXEL_STREAM_3BYTE_18 = 0x2e, |
MIPI_DSI_PACKED_PIXEL_STREAM_24 = 0x3e, |
}; |
/* MIPI DSI Peripheral-to-Processor transaction types */ |
enum { |
MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT = 0x02, |
MIPI_DSI_RX_END_OF_TRANSMISSION = 0x08, |
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE = 0x11, |
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE = 0x12, |
MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE = 0x1a, |
MIPI_DSI_RX_DCS_LONG_READ_RESPONSE = 0x1c, |
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE = 0x21, |
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE = 0x22, |
}; |
/* MIPI DCS commands */ |
enum { |
MIPI_DCS_NOP = 0x00, |
MIPI_DCS_SOFT_RESET = 0x01, |
MIPI_DCS_GET_DISPLAY_ID = 0x04, |
MIPI_DCS_GET_RED_CHANNEL = 0x06, |
MIPI_DCS_GET_GREEN_CHANNEL = 0x07, |
MIPI_DCS_GET_BLUE_CHANNEL = 0x08, |
MIPI_DCS_GET_DISPLAY_STATUS = 0x09, |
MIPI_DCS_GET_POWER_MODE = 0x0A, |
MIPI_DCS_GET_ADDRESS_MODE = 0x0B, |
MIPI_DCS_GET_PIXEL_FORMAT = 0x0C, |
MIPI_DCS_GET_DISPLAY_MODE = 0x0D, |
MIPI_DCS_GET_SIGNAL_MODE = 0x0E, |
MIPI_DCS_GET_DIAGNOSTIC_RESULT = 0x0F, |
MIPI_DCS_ENTER_SLEEP_MODE = 0x10, |
MIPI_DCS_EXIT_SLEEP_MODE = 0x11, |
MIPI_DCS_ENTER_PARTIAL_MODE = 0x12, |
MIPI_DCS_ENTER_NORMAL_MODE = 0x13, |
MIPI_DCS_EXIT_INVERT_MODE = 0x20, |
MIPI_DCS_ENTER_INVERT_MODE = 0x21, |
MIPI_DCS_SET_GAMMA_CURVE = 0x26, |
MIPI_DCS_SET_DISPLAY_OFF = 0x28, |
MIPI_DCS_SET_DISPLAY_ON = 0x29, |
MIPI_DCS_SET_COLUMN_ADDRESS = 0x2A, |
MIPI_DCS_SET_PAGE_ADDRESS = 0x2B, |
MIPI_DCS_WRITE_MEMORY_START = 0x2C, |
MIPI_DCS_WRITE_LUT = 0x2D, |
MIPI_DCS_READ_MEMORY_START = 0x2E, |
MIPI_DCS_SET_PARTIAL_AREA = 0x30, |
MIPI_DCS_SET_SCROLL_AREA = 0x33, |
MIPI_DCS_SET_TEAR_OFF = 0x34, |
MIPI_DCS_SET_TEAR_ON = 0x35, |
MIPI_DCS_SET_ADDRESS_MODE = 0x36, |
MIPI_DCS_SET_SCROLL_START = 0x37, |
MIPI_DCS_EXIT_IDLE_MODE = 0x38, |
MIPI_DCS_ENTER_IDLE_MODE = 0x39, |
MIPI_DCS_SET_PIXEL_FORMAT = 0x3A, |
MIPI_DCS_WRITE_MEMORY_CONTINUE = 0x3C, |
MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E, |
MIPI_DCS_SET_TEAR_SCANLINE = 0x44, |
MIPI_DCS_GET_SCANLINE = 0x45, |
MIPI_DCS_READ_DDB_START = 0xA1, |
MIPI_DCS_READ_DDB_CONTINUE = 0xA8, |
}; |
/* MIPI DCS pixel formats */ |
#define MIPI_DCS_PIXEL_FMT_24BIT 7 |
#define MIPI_DCS_PIXEL_FMT_18BIT 6 |
#define MIPI_DCS_PIXEL_FMT_16BIT 5 |
#define MIPI_DCS_PIXEL_FMT_12BIT 3 |
#define MIPI_DCS_PIXEL_FMT_8BIT 2 |
#define MIPI_DCS_PIXEL_FMT_3BIT 1 |
#endif |