Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 3030 → Rev 3031

/drivers/ddk/Makefile
29,6 → 29,7
linux/dmapool.c \
linux/ctype.c \
linux/string.c \
linux/time.c \
malloc/malloc.c \
stdio/vsprintf.c \
string/_memmove.S \
/drivers/ddk/core.S
35,6 → 35,8
 
.global _MapIoMem
 
.global _MapPage
 
.global _MutexInit
.global _MutexLock
.global _MutexUnlock
95,6 → 97,8
 
.def _MapIoMem; .scl 2; .type 32; .endef
 
.def _MapPage; .scl 2; .type 32; .endef
 
.def _MutexInit; .scl 2; .type 32; .endef
.def _MutexLock; .scl 2; .type 32; .endef
.def _MutexUnlock; .scl 2; .type 32; .endef
155,6 → 159,8
 
_MapIoMem:
 
_MapPage:
 
_MutexInit:
_MutexLock:
_MutexUnlock:
219,6 → 225,7
.ascii " -export:KernelFree" # stdcall
 
.ascii " -export:MapIoMem" # stdcall
.ascii " -export:MapPage" # stdcall
 
.ascii " -export:MutexInit" # fastcall
.ascii " -export:MutexLock" # fastcall
/drivers/ddk/linux/time.c
0,0 → 1,148
#include <jiffies.h>
 
 
 
#define HZ_TO_MSEC_MUL32 0xA0000000
#define HZ_TO_MSEC_ADJ32 0x0
#define HZ_TO_MSEC_SHR32 28
#define HZ_TO_MSEC_MUL64 0xA000000000000000
#define HZ_TO_MSEC_ADJ64 0x0
#define HZ_TO_MSEC_SHR64 60
#define MSEC_TO_HZ_MUL32 0xCCCCCCCD
#define MSEC_TO_HZ_ADJ32 0x733333333
#define MSEC_TO_HZ_SHR32 35
#define MSEC_TO_HZ_MUL64 0xCCCCCCCCCCCCCCCD
#define MSEC_TO_HZ_ADJ64 0x73333333333333333
#define MSEC_TO_HZ_SHR64 67
#define HZ_TO_MSEC_NUM 10
#define HZ_TO_MSEC_DEN 1
#define MSEC_TO_HZ_NUM 1
#define MSEC_TO_HZ_DEN 10
 
#define HZ_TO_USEC_MUL32 0x9C400000
#define HZ_TO_USEC_ADJ32 0x0
#define HZ_TO_USEC_SHR32 18
#define HZ_TO_USEC_MUL64 0x9C40000000000000
#define HZ_TO_USEC_ADJ64 0x0
#define HZ_TO_USEC_SHR64 50
#define USEC_TO_HZ_MUL32 0xD1B71759
#define USEC_TO_HZ_ADJ32 0x1FFF2E48E8A7
#define USEC_TO_HZ_SHR32 45
#define USEC_TO_HZ_MUL64 0xD1B71758E219652C
#define USEC_TO_HZ_ADJ64 0x1FFF2E48E8A71DE69AD4
#define USEC_TO_HZ_SHR64 77
#define HZ_TO_USEC_NUM 10000
#define HZ_TO_USEC_DEN 1
#define USEC_TO_HZ_NUM 1
#define USEC_TO_HZ_DEN 10000
 
 
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000LL
 
 
unsigned int jiffies_to_msecs(const unsigned long j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return (MSEC_PER_SEC / HZ) * j;
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
# else
return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
# endif
#endif
}
 
unsigned int jiffies_to_usecs(const unsigned long j)
{
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j;
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
# else
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
# endif
#endif
}
 
 
/*
* When we convert to jiffies then we interpret incoming values
* the following way:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor
*
* We must also be careful about 32-bit overflows.
*/
unsigned long msecs_to_jiffies(const unsigned int m)
{
/*
* Negative value, means infinite timeout:
*/
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
 
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice
* round multiple of HZ, divide with the factor between them,
* but round upwards:
*/
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of
* 1000 - simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot
* overflow:
*/
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return m * (HZ / MSEC_PER_SEC);
#else
/*
* Generic case - multiply, round and divide. But first
* check that if we are doing a net multiplication, that
* we wouldn't overflow:
*/
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
>> MSEC_TO_HZ_SHR32;
#endif
}
 
unsigned long usecs_to_jiffies(const unsigned int u)
{
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * (HZ / USEC_PER_SEC);
#else
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
#endif
}
 
/drivers/ddk/stdio/vsprintf.c
24,6 → 24,7
#include <linux/kernel.h>
#include <errno-base.h>
#include <linux/ioport.h>
#include <linux/export.h>
 
#include <asm/div64.h>
 
/drivers/include/errno-base.h
File deleted
/drivers/include/ddk.h
4,7 → 4,10
#define __DDK_H__
 
#include <kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <mutex.h>
#include <linux/pci.h>
 
 
#define OS_BASE 0x80000000
56,7 → 59,24
u32_t drvEntry(int, char *)__asm__("_drvEntry");
 
 
#define __WARN() dbgprintf(__FILE__, __LINE__)
 
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN(); \
unlikely(__ret_warn_on); \
})
#endif
 
 
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
// if (size != 0 && n > SIZE_MAX / size)
// return NULL;
return kmalloc(n * size, flags);
}
 
 
#endif /* DDK_H */
/drivers/include/drm/drmP.hh
File deleted
/drivers/include/drm/drmP.h
36,15 → 36,22
#define _DRM_P_H_
 
#ifdef __KERNEL__
#ifdef __alpha__
/* add include of current.h so that "current" is defined
* before static inline funcs in wait.h. Doing this so we
* can build the DRM (part of PI DRI). 4/21/2000 S + B */
#include <asm/current.h>
#endif /* __alpha__ */
 
#include <syscall.h>
 
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/bug.h>
 
//#include <linux/miscdevice.h>
//#include <linux/fs.h>
72,25 → 79,15
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
 
struct module;
 
 
 
#include <drm_edid.h>
#include <drm_crtc.h>
 
 
struct drm_file;
struct drm_device;
 
//#include "drm_os_linux.h"
#include "drm_hashtab.h"
#include "drm_mm.h"
//#include <drm/drm_os_linux.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
 
#define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_MODE 0x08
 
#define KHZ2PICOS(a) (1000000000UL/(a))
 
/* get_scanout_position() return flags */
99,45 → 96,51
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
 
 
extern void drm_ut_debug_printk(unsigned int request_level,
 
#define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
/*
* Three debug levels are defined.
* drm_core, drm_driver, drm_kms
* drm_core level can be used in the generic drm code. For example:
* drm_ioctl, drm_mm, drm_memory
* The macro definition of DRM_DEBUG is used.
* DRM_DEBUG(fmt, args...)
* The debug info by using the DRM_DEBUG can be obtained by adding
* the boot option of "drm.debug=1".
*
* drm_driver level can be used in the specific drm driver. It is used
* to add the debug info related with the drm driver. For example:
* i915_drv, i915_dma, i915_gem, radeon_drv,
* The macro definition of DRM_DEBUG_DRIVER can be used.
* DRM_DEBUG_DRIVER(fmt, args...)
* The debug info by using the DRM_DEBUG_DRIVER can be obtained by
* adding the boot option of "drm.debug=0x02"
*
* drm_kms level can be used in the KMS code related with specific drm driver.
* It is used to add the debug info related with KMS mode. For example:
* the connector/crtc ,
* The macro definition of DRM_DEBUG_KMS can be used.
* DRM_DEBUG_KMS(fmt, args...)
* The debug info by using the DRM_DEBUG_KMS can be obtained by
* adding the boot option of "drm.debug=0x04"
*
* If we add the boot option of "drm.debug=0x06", we can get the debug info by
* using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER.
* If we add the boot option of "drm.debug=0x05", we can get the debug info by
* using the DRM_DEBUG_KMS and DRM_DEBUG.
*/
 
extern __printf(4, 5)
void drm_ut_debug_printk(unsigned int request_level,
const char *prefix,
const char *function_name,
const char *format, ...);
extern __printf(2, 3)
int drm_err(const char *func, const char *format, ...);
 
#define DRM_DEBUG_MODE(prefix, fmt, args...) \
do { \
dbgprintf("drm debug: %s" fmt, \
__func__, ##args); \
} while (0)
 
#define DRM_DEBUG(fmt, args...) \
do { \
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \
} while(0)
 
#define DRM_DEBUG_KMS(fmt, args...) \
do { \
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \
} while(0)
 
#define DRM_DEBUG_DRIVER(fmt, args...) \
do { \
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \
} while (0)
 
#define DRM_LOG_KMS(fmt, args...) \
do { \
printk("[" DRM_NAME "]" fmt, ##args); \
} while (0)
 
static inline int drm_sysfs_connector_add(struct drm_connector *connector)
{ return 0; };
 
static inline void drm_sysfs_connector_remove(struct drm_connector *connector)
{ };
 
#if 0
 
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
157,6 → 160,7
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
 
#define DRIVER_BUS_PCI 0x1
#define DRIVER_BUS_PLATFORM 0x2
193,22 → 197,12
* \param fmt printf() like format string.
* \param arg arguments
*/
#define DRM_ERROR(fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
#define DRM_ERROR(fmt, ...) \
drm_err(__func__, fmt, ##__VA_ARGS__)
 
/**
* Memory error output.
*
* \param area memory area where the error occurred.
* \param fmt printf() like format string.
* \param arg arguments
*/
#define DRM_MEM_ERROR(area, fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
drm_mem_stats[area].name , ##arg)
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
 
#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
 
/**
* Debug output.
*
216,45 → 210,43
* \param arg arguments
*/
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, args...) \
do { \
drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
__func__, fmt, ##args); \
#define DRM_DEBUG(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
 
#define DRM_DEBUG_DRIVER(fmt, args...) \
#define DRM_DEBUG_DRIVER(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_DEBUG_KMS(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \
__func__, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_DEBUG_KMS(fmt, args...) \
#define DRM_DEBUG_PRIME(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \
__func__, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG(fmt, args...) \
#define DRM_LOG(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_CORE, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_KMS(fmt, args...) \
#define DRM_LOG_KMS(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_KMS, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_MODE(fmt, args...) \
#define DRM_LOG_MODE(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_MODE, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_DRIVER(fmt, args...) \
#define DRM_LOG_DRIVER(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#else
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#define DRM_LOG(fmt, arg...) do { } while (0)
#define DRM_LOG_KMS(fmt, args...) do { } while (0)
293,6 → 285,7
} \
} while (0)
 
#if 0
/**
* Ioctl function type.
*
357,7 → 350,6
struct drm_buf *next; /**< Kernel-only: used for free list */
__volatile__ int waiting; /**< On kernel DMA queue */
__volatile__ int pending; /**< On hardware DMA queue */
wait_queue_head_t dma_wait; /**< Processes waiting */
struct drm_file *file_priv; /**< Private of holding file descr */
int context; /**< Kernel queue for this buffer */
int while_locked; /**< Dispatch this buffer while locked */
429,11 → 421,17
void (*destroy)(struct drm_pending_event *event);
};
 
/* initial implementaton using a linked list - todo hashtab */
struct drm_prime_file_private {
struct list_head head;
struct mutex lock;
};
 
/** File private data */
struct drm_file {
int authenticated;
pid_t pid;
uid_t uid;
struct pid *pid;
kuid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct list_head lhead;
456,6 → 454,8
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
 
struct drm_prime_file_private prime;
};
 
/** Wait queue */
677,7 → 677,7
void *driver_private;
};
 
#include "drm_crtc.h"
#include <drm/drm_crtc.h>
 
/* per-master structure */
struct drm_master {
758,11 → 758,11
* @dev: DRM device
* @crtc: counter to fetch
*
* Driver callback for fetching a raw hardware vblank counter
* for @crtc. If a device doesn't have a hardware counter, the
* driver can simply return the value of drm_vblank_count and
* make the enable_vblank() and disable_vblank() hooks into no-ops,
* leaving interrupts enabled at all times.
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
* return the value of drm_vblank_count. The DRM core will account for
* missed vblank events while interrupts where disabled based on system
* timestamps.
*
* Wraparound handling and loss of events due to modesetting is dealt
* with in the DRM core code.
879,12 → 879,6
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file * file_priv);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct drm_file *file_priv);
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
 
915,6 → 909,20
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
 
/* prime: */
/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
/* export GEM -> dmabuf */
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
/* import dmabuf -> GEM */
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
 
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
 
930,7 → 938,7
uint32_t handle);
 
/* Driver private ops for this object */
struct vm_operations_struct *gem_vm_ops;
const struct vm_operations_struct *gem_vm_ops;
 
int major;
int minor;
1092,12 → 1100,8
 
/*@} */
 
/** \name DMA queues (contexts) */
/** \name DMA support */
/*@{ */
int queue_count; /**< Number of active DMA queues */
int queue_reserved; /**< Number of reserved DMA queues */
int queue_slots; /**< Actual length of queuelist */
// struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
// struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
 
1182,6 → 1186,8
struct idr object_name_idr;
/*@} */
int switch_power_state;
 
atomic_t unplugged; /* device has been unplugged or gone away */
};
 
#define DRM_SWITCH_POWER_ON 0
1272,17 → 1278,12
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma);
extern void drm_vm_close_locked(struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
extern void drm_mem_init(void);
extern int drm_mem_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
 
#include <drm/drm_memory.h>
extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1346,6 → 1347,8
 
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(char *addr, unsigned long length);
 
/* Locking IOCTL support (drm_lock.h) */
extern int drm_lock(struct drm_device *dev, void *data,
1397,12 → 1400,8
/* IRQ support (drm_irq.h) */
extern int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
extern int drm_irq_install(struct drm_device *dev);
extern int drm_irq_uninstall(struct drm_device *dev);
extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev);
extern void drm_driver_irq_uninstall(struct drm_device *dev);
 
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
1478,8 → 1477,12
 
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern void drm_unplug_dev(struct drm_device *dev);
#endif
 
extern unsigned int drm_debug;
 
#if 0
extern unsigned int drm_vblank_offdelay;
extern unsigned int drm_timestamp_precision;
 
1510,7 → 1513,6
/* Info file support */
extern int drm_name_info(struct seq_file *m, void *data);
extern int drm_vm_info(struct seq_file *m, void *data);
extern int drm_queues_info(struct seq_file *m, void *data);
extern int drm_bufs_info(struct seq_file *m, void *data);
extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
1533,6 → 1535,7
struct drm_ati_pcigart_info * gart_info);
extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info * gart_info);
#endif
 
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
1539,6 → 1542,7
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
 
#if 0
/* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
1680,6 → 1684,7
}
 
 
 
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
if (size * nmemb <= PAGE_SIZE)
1702,7 → 1707,12
 
#endif
 
#define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2
#define DRM_PCIE_SPEED_80 4
 
extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
 
static __inline__ int drm_device_is_agp(struct drm_device *dev)
{
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1713,4 → 1723,8
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
#endif /* __KERNEL__ */
 
#define drm_sysfs_connector_add(connector)
#define drm_sysfs_connector_remove(connector)
 
#endif
/drivers/include/drm/drm_crtc.h
30,6 → 30,7
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/fb.h>
#include <drm/drm_mode.h>
 
#include <drm/drm_fourcc.h>
 
36,6 → 37,7
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
struct drm_object_properties;
 
 
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
50,8 → 52,16
struct drm_mode_object {
uint32_t id;
uint32_t type;
struct drm_object_properties *properties;
};
 
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
int count;
uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
 
/*
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
109,7 → 119,8
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), .vrefresh = 0
.vscan = (vs), .flags = (f), .vrefresh = 0, \
.base.type = DRM_MODE_OBJECT_MODE
 
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
 
121,7 → 132,7
char name[DRM_DISPLAY_MODE_LEN];
 
enum drm_mode_status status;
int type;
unsigned int type;
 
/* Proposed mode values */
int clock; /* in kHz */
157,8 → 168,6
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
int crtc_hadjusted;
int crtc_vadjusted;
 
/* Driver private mode info */
int private_size;
207,11 → 216,10
u32 color_formats;
 
u8 cea_rev;
 
char *raw_edid; /* if any */
};
 
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
257,7 → 265,7
struct drm_mode_object base;
struct list_head head;
unsigned int length;
void *data;
unsigned char data[];
};
 
struct drm_property_enum {
285,19 → 293,16
 
/**
* drm_crtc_funcs - control CRTCs for a given device
* @save: save CRTC state
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidate (e.g. resume)
* @dpms: control display power levels
* @save: save CRTC state
* @resore: restore CRTC state
* @lock: lock the CRTC
* @unlock: unlock the CRTC
* @shadow_allocate: allocate shadow pixmap
* @shadow_create: create shadow pixmap for rotation support
* @shadow_destroy: free shadow pixmap
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @cursor_set: setup the cursor
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object.
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
341,6 → 346,9
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
 
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
};
 
/**
351,6 → 359,9
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
* @invert_dimensions: for purposes of error checking crtc vs fb sizes,
* invert the width/height of the crtc. This is used if the driver
* is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
360,6 → 371,7
* @framedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
383,6 → 395,8
*/
struct drm_display_mode hwmode;
 
bool invert_dimensions;
 
int x, y;
const struct drm_crtc_funcs *funcs;
 
395,6 → 409,8
 
/* if you are using the helper */
void *helper_private;
 
struct drm_object_properties properties;
};
 
 
404,11 → 420,8
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidate (e.g. resume)
* @mode_valid: is this mode valid on the given connector?
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @detect: is this connector active?
* @get_modes: get mode list for this connector
* @fill_modes: fill mode list for this connector
* @set_property: property for this connector may need update
* @destroy: make object go away
* @force: notify the driver the connector is forced on
451,7 → 464,6
};
 
#define DRM_CONNECTOR_MAX_UMODES 16
#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 3
 
520,8 → 532,7
* @funcs: connector control functions
* @user_modes: user added mode list
* @edid_blob_ptr: DRM property containing EDID if present
* @property_ids: property tracking for this connector
* @property_values: value pointers or data for properties
* @properties: property tracking for this connector
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
565,8 → 576,7
 
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
struct drm_object_properties properties;
 
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
 
588,6 → 598,7
int video_latency[2]; /* [0]: progressive, [1]: interlaced */
int audio_latency[2];
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
};
 
/**
595,6 → 606,7
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
* @set_property: called when a property is changed
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
605,6 → 617,9
uint32_t src_w, uint32_t src_h);
int (*disable_plane)(struct drm_plane *plane);
void (*destroy)(struct drm_plane *plane);
 
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
};
 
/**
622,6 → 637,7
* @enabled: enabled flag
* @funcs: helper functions
* @helper_private: storage for drver layer
* @properties: property tracking for this plane
*/
struct drm_plane {
struct drm_device *dev;
644,6 → 660,8
 
const struct drm_plane_funcs *funcs;
void *helper_private;
 
struct drm_object_properties properties;
};
 
/**
663,8 → 681,6
* This is used to set modes.
*/
struct drm_mode_set {
struct list_head head;
 
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
761,7 → 777,7
 
int min_width, min_height;
int max_width, max_height;
struct drm_mode_config_funcs *funcs;
const struct drm_mode_config_funcs *funcs;
resource_size_t fb_base;
 
/* output poll support */
796,6 → 812,9
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
struct drm_property *dirty_info_property;
 
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
};
 
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
807,20 → 826,26
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
 
struct drm_prop_enum_list {
int type;
char *name;
};
 
extern void drm_crtc_init(struct drm_device *dev,
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
 
extern void drm_connector_init(struct drm_device *dev,
extern int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
 
extern void drm_connector_cleanup(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
 
extern void drm_encoder_init(struct drm_device *dev,
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type);
843,11 → 868,13
extern char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
862,7 → 889,7
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_display_mode *mode);
const struct drm_display_mode *mode);
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
 
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
888,6 → 915,12
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
extern int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
894,6 → 927,9
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
900,10 → 936,24
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
extern int drm_connector_attach_property(struct drm_connector *connector,
extern void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
919,7 → 969,7
struct drm_encoder *encoder);
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
995,7 → 1045,28
int hdisplay, int vdisplay);
 
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
int hsize, int vsize, int fresh,
bool rb);
 
extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
extern int drm_format_num_planes(uint32_t format);
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
 
#endif /* __DRM_CRTC_H__ */
/drivers/include/drm/drm_crtc_helper.h
44,6 → 44,13
ENTER_ATOMIC_MODE_SET,
};
 
/**
* drm_crtc_helper_funcs - helper operations for CRTCs
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
55,7 → 62,7
 
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
76,6 → 83,13
void (*disable)(struct drm_crtc *crtc);
};
 
/**
* drm_encoder_helper_funcs - helper operations for encoders
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
82,7 → 96,7
void (*restore)(struct drm_encoder *encoder);
 
bool (*mode_fixup)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
97,6 → 111,13
void (*disable)(struct drm_encoder *encoder);
};
 
/**
* drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector?
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
145,6 → 166,4
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
 
extern int drm_format_num_planes(uint32_t format);
 
#endif
/drivers/include/drm/drm_dp_helper.h
26,7 → 26,19
#include <linux/types.h>
#include <linux/i2c.h>
 
/* From the VESA DisplayPort spec */
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
* DP and DPCD versions are independent. Differences from 1.0 are not noted,
* 1.0 devices basically don't exist in the wild.
*
* Abbreviations, in chronological order:
*
* eDP: Embedded DisplayPort version 1
* DPI: DisplayPort Interoperability Guideline v1.1a
* 1.2: DisplayPort 1.2
*
* 1.2 formally includes both eDP and DPI definitions.
*/
 
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
53,7 → 65,7
 
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
# define DP_TPS3_SUPPORTED (1 << 6)
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
 
#define DP_MAX_DOWNSPREAD 0x003
69,15 → 81,33
/* 10b = TMDS or HDMI */
/* 11b = Other */
# define DP_FORMAT_CONVERSION (1 << 3)
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
 
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
 
#define DP_EDP_CONFIGURATION_CAP 0x00d
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
 
#define DP_PSR_SUPPORT 0x070
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
# define DP_I2C_SPEED_10K 0x04
# define DP_I2C_SPEED_100K 0x08
# define DP_I2C_SPEED_400K 0x10
# define DP_I2C_SPEED_1M 0x20
 
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
 
/* Multiple stream transport */
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
 
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
#define DP_PSR_CAPS 0x071
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
# define DP_PSR_SETUP_TIME_275 (1 << 1)
89,11 → 119,36
# define DP_PSR_SETUP_TIME_MASK (7 << 1)
# define DP_PSR_SETUP_TIME_SHIFT 1
 
/*
* 0x80-0x8f describe downstream port capabilities, but there are two layouts
* based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
* each port's descriptor is one byte wide. If it was set, each port's is
* four bytes wide, starting with the one byte from the base info. As of
* DP interop v1.1a only VGA defines additional detail.
*/
 
/* offset 0 */
#define DP_DOWNSTREAM_PORT_0 0x80
# define DP_DS_PORT_TYPE_MASK (7 << 0)
# define DP_DS_PORT_TYPE_DP 0
# define DP_DS_PORT_TYPE_VGA 1
# define DP_DS_PORT_TYPE_DVI 2
# define DP_DS_PORT_TYPE_HDMI 3
# define DP_DS_PORT_TYPE_NON_EDID 4
# define DP_DS_PORT_HPD (1 << 3)
/* offset 1 for VGA is maximum megapixels per second / 8 */
/* offset 2 */
# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
# define DP_DS_VGA_8BPC 0
# define DP_DS_VGA_10BPC 1
# define DP_DS_VGA_12BPC 2
# define DP_DS_VGA_16BPC 3
 
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
 
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
103,7 → 158,7
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_3 3
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
 
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
144,16 → 199,32
 
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
 
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
 
#define DP_PSR_EN_CFG 0x170
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
 
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
 
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
 
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
 
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
# define DP_SINK_CP_READY (1 << 6)
 
#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
160,8 → 231,6
# define DP_CP_IRQ (1 << 2)
# define DP_SINK_SPECIFIC_IRQ (1 << 6)
 
#define DP_EDP_CONFIGURATION_SET 0x10a
 
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
# define DP_LANE_CR_DONE (1 << 0)
213,18 → 282,22
# define DP_TEST_NAK (1 << 1)
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
 
#define DP_SOURCE_OUI 0x300
#define DP_SINK_OUI 0x400
#define DP_BRANCH_OUI 0x500
 
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
 
#define DP_PSR_ERROR_STATUS 0x2006
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
 
#define DP_PSR_ESI 0x2007
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
 
#define DP_PSR_STATUS 0x2008
#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
# define DP_PSR_SINK_INACTIVE 0
# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
# define DP_PSR_SINK_ACTIVE_RFB 2
/drivers/include/drm/drm_edid.h
90,12 → 90,26
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
__le16 sec_gtf_toggle; /* A000=use above, 20=use below */
u8 flags;
union {
struct {
u8 reserved;
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed)) gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
u8 data2; /* plus low 2 of above: max hactive */
u8 supported_aspects;
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
} __attribute__((packed)) cvt;
} formula;
} __attribute__((packed));
 
struct detailed_data_wpindex {
238,5 → 252,6
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
int drm_load_edid_firmware(struct drm_connector *connector);
 
#endif /* __DRM_EDID_H__ */
/drivers/include/drm/drm_fb_helper.h
34,7 → 34,6
 
 
struct drm_fb_helper_crtc {
uint32_t crtc_id;
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
};
73,7 → 72,6
int connector_count;
struct drm_fb_helper_connector **connector_info;
struct drm_fb_helper_funcs *funcs;
int conn_limit;
struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head kernel_fb_list;
/drivers/include/drm/drm_fixed.h
37,6 → 37,7
#define dfixed_init(A) { .full = dfixed_const((A)) }
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
#define dfixed_trunc(A) ((A).full >> 12)
#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
 
static inline u32 dfixed_floor(fixed20_12 A)
{
/drivers/include/drm/drm_fourcc.h
106,9 → 106,10
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
/* 2 non contiguous plane YCbCr */
#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
/*
131,7 → 132,4
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
/* 3 non contiguous plane YCbCr */
#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
 
#endif /* DRM_FOURCC_H */
/drivers/include/drm/drm_mm.h
50,6 → 50,7
unsigned scanned_next_free : 1;
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
unsigned long color;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
66,6 → 67,7
spinlock_t unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
73,6 → 75,9
unsigned long scan_start;
unsigned long scan_end;
struct drm_mm_node *prev_scanned_node;
 
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
unsigned long *start, unsigned long *end);
};
 
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
100,11 → 105,13
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic);
112,13 → 119,13
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0);
return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
127,7 → 134,18
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_color_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, color,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
137,7 → 155,7
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
149,18 → 167,59
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range(
unsigned long color,
bool best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
bool best_match);
static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
start, end, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
start, end, best_match);
}
extern int drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
171,10 → 230,14
return block->mm;
}
 
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment);
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color);
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
/drivers/include/drm/drm_mode.h
1,444 → 1,463
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* Copyright (c) 2007-2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
 
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
 
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
 
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
 
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
 
/* Scaling mode options */
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
software can still scale) */
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
 
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_DITHERING_AUTO 2
 
/* Dirty info options */
#define DRM_MODE_DIRTY_OFF 0
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
 
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
 
__u32 vrefresh;
 
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
 
struct drm_mode_card_res {
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
 
struct drm_mode_crtc {
__u64 set_connectors_ptr;
__u32 count_connectors;
 
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
 
__u32 x, y; /**< Position on the frameuffer */
 
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
 
/* Planes blend with or override other bits on the CRTC */
struct drm_mode_set_plane {
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id; /* fb object contains surface format type */
__u32 flags; /* see above flags */
 
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
 
/* Source values are 16.16 fixed point */
__u32 src_x, src_y;
__u32 src_h, src_w;
};
 
struct drm_mode_get_plane {
__u32 plane_id;
 
__u32 crtc_id;
__u32 fb_id;
 
__u32 possible_crtcs;
__u32 gamma_size;
 
__u32 count_format_types;
__u64 format_type_ptr;
};
 
struct drm_mode_get_plane_res {
__u64 plane_id_ptr;
__u32 count_planes;
};
 
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
 
struct drm_mode_get_encoder {
__u32 encoder_id;
__u32 encoder_type;
 
__u32 crtc_id; /**< Id of crtc */
 
__u32 possible_crtcs;
__u32 possible_clones;
};
 
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_SUBCONNECTOR_SCART 9
 
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
 
struct drm_mode_get_connector {
 
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
 
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
 
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
 
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
};
 
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
 
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
 
struct drm_mode_get_property {
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
 
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
 
__u32 count_values;
__u32 count_enum_blobs;
};
 
struct drm_mode_connector_set_property {
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
 
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
__u64 data;
};
 
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
__u32 handle;
};
 
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
 
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
 
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offset[0] and UV as
* offeset[1]. Note that offset[0] will generally
* be 0.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
};
 
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
 
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
 
/*
* Mark a region of a framebuffer as dirty.
*
* Some hardware does not automatically update display contents
* as a hardware or software draw to a framebuffer. This ioctl
* allows userspace to tell the kernel and the hardware what
* regions of the framebuffer have changed.
*
* The kernel or hardware is free to update more then just the
* region specified by the clip rects. The kernel or hardware
* may also delay and/or coalesce several calls to dirty into a
* single update.
*
* Userspace may annotate the updates, the annotates are a
* promise made by the caller that the change is either a copy
* of pixels or a fill of a single color in the region specified.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
* the number of updated regions are half of num_clips given,
* where the clip rects are paired in src and dst. The width and
* height of each one of the pairs must match.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
* promises that the region specified of the clip rects is filled
* completely with a single color as given in the color argument.
*/
 
struct drm_mode_fb_dirty_cmd {
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
 
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
 
/*
* depending on the value in flags different members are used.
*
* CURSOR_BO uses
* crtc
* width
* height
* handle - if 0 turns the cursor of
*
* CURSOR_MOVE uses
* crtc
* x
* y
*/
struct drm_mode_cursor {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
};
 
struct drm_mode_crtc_lut {
__u32 crtc_id;
__u32 gamma_size;
 
/* pointers to arrays */
__u64 red;
__u64 green;
__u64 blue;
};
 
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
 
/*
* Request a page flip on the specified crtc.
*
* This ioctl will ask KMS to schedule a page flip for the specified
* crtc. Once any pending rendering targeting the specified fb (as of
* ioctl time) has completed, the crtc will be reprogrammed to display
* that fb after the next vertical refresh. The ioctl returns
* immediately, but subsequent rendering to the current fb will block
* in the execbuffer ioctl until the page flip happens. If a page
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
* The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
* request that drm sends back a vblank event (see drm.h: struct
* drm_event_vblank) when the page flip is done. The user_data field
* passed in with this ioctl will be returned as the user_data field
* in the vblank event struct.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
*/
 
struct drm_mode_crtc_page_flip {
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
 
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
uint32_t height;
uint32_t width;
uint32_t bpp;
uint32_t flags;
/* handle, pitch, size will be returned */
uint32_t handle;
uint32_t pitch;
uint64_t size;
};
 
/* set up for mmap of a dumb scanout buffer */
struct drm_mode_map_dumb {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_mode_destroy_dumb {
uint32_t handle;
};
 
#endif
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* Copyright (c) 2007-2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
 
#include <linux/types.h>
 
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
 
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
 
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
 
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
 
/* Scaling mode options */
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
software can still scale) */
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
 
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_DITHERING_AUTO 2
 
/* Dirty info options */
#define DRM_MODE_DIRTY_OFF 0
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
 
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
 
__u32 vrefresh;
 
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
 
struct drm_mode_card_res {
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
 
struct drm_mode_crtc {
__u64 set_connectors_ptr;
__u32 count_connectors;
 
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
 
__u32 x, y; /**< Position on the frameuffer */
 
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
 
/* Planes blend with or override other bits on the CRTC */
struct drm_mode_set_plane {
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id; /* fb object contains surface format type */
__u32 flags; /* see above flags */
 
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
 
/* Source values are 16.16 fixed point */
__u32 src_x, src_y;
__u32 src_h, src_w;
};
 
struct drm_mode_get_plane {
__u32 plane_id;
 
__u32 crtc_id;
__u32 fb_id;
 
__u32 possible_crtcs;
__u32 gamma_size;
 
__u32 count_format_types;
__u64 format_type_ptr;
};
 
struct drm_mode_get_plane_res {
__u64 plane_id_ptr;
__u32 count_planes;
};
 
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
#define DRM_MODE_ENCODER_VIRTUAL 5
 
struct drm_mode_get_encoder {
__u32 encoder_id;
__u32 encoder_type;
 
__u32 crtc_id; /**< Id of crtc */
 
__u32 possible_crtcs;
__u32 possible_clones;
};
 
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_SUBCONNECTOR_SCART 9
 
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
#define DRM_MODE_CONNECTOR_VIRTUAL 15
 
struct drm_mode_get_connector {
 
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
 
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
 
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
 
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
};
 
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
 
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
 
struct drm_mode_get_property {
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
 
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
 
__u32 count_values;
__u32 count_enum_blobs;
};
 
struct drm_mode_connector_set_property {
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
 
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_obj_set_property {
__u64 value;
__u32 prop_id;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
__u64 data;
};
 
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
__u32 handle;
};
 
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
 
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags; /* see above flags */
 
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offset[0] and UV as
* offeset[1]. Note that offset[0] will generally
* be 0.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
};
 
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
 
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
 
/*
* Mark a region of a framebuffer as dirty.
*
* Some hardware does not automatically update display contents
* as a hardware or software draw to a framebuffer. This ioctl
* allows userspace to tell the kernel and the hardware what
* regions of the framebuffer have changed.
*
* The kernel or hardware is free to update more then just the
* region specified by the clip rects. The kernel or hardware
* may also delay and/or coalesce several calls to dirty into a
* single update.
*
* Userspace may annotate the updates, the annotates are a
* promise made by the caller that the change is either a copy
* of pixels or a fill of a single color in the region specified.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
* the number of updated regions are half of num_clips given,
* where the clip rects are paired in src and dst. The width and
* height of each one of the pairs must match.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
* promises that the region specified of the clip rects is filled
* completely with a single color as given in the color argument.
*/
 
struct drm_mode_fb_dirty_cmd {
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
 
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_CURSOR_BO 0x01
#define DRM_MODE_CURSOR_MOVE 0x02
#define DRM_MODE_CURSOR_FLAGS 0x03
 
/*
* depending on the value in flags different members are used.
*
* CURSOR_BO uses
* crtc
* width
* height
* handle - if 0 turns the cursor of
*
* CURSOR_MOVE uses
* crtc
* x
* y
*/
struct drm_mode_cursor {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
};
 
struct drm_mode_crtc_lut {
__u32 crtc_id;
__u32 gamma_size;
 
/* pointers to arrays */
__u64 red;
__u64 green;
__u64 blue;
};
 
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
 
/*
* Request a page flip on the specified crtc.
*
* This ioctl will ask KMS to schedule a page flip for the specified
* crtc. Once any pending rendering targeting the specified fb (as of
* ioctl time) has completed, the crtc will be reprogrammed to display
* that fb after the next vertical refresh. The ioctl returns
* immediately, but subsequent rendering to the current fb will block
* in the execbuffer ioctl until the page flip happens. If a page
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
* The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
* request that drm sends back a vblank event (see drm.h: struct
* drm_event_vblank) when the page flip is done. The user_data field
* passed in with this ioctl will be returned as the user_data field
* in the vblank event struct.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
*/
 
struct drm_mode_crtc_page_flip {
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
 
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
uint32_t height;
uint32_t width;
uint32_t bpp;
uint32_t flags;
/* handle, pitch, size will be returned */
uint32_t handle;
uint32_t pitch;
uint64_t size;
};
 
/* set up for mmap of a dumb scanout buffer */
struct drm_mode_map_dumb {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_mode_destroy_dumb {
uint32_t handle;
};
 
#endif
/drivers/include/drm/drm_pciids.h
1,7 → 1,3
/*
This file is auto-generated from the drm_pciids.txt in the DRM CVS
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
181,6 → 177,7
{0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
198,9 → 195,64
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
242,6 → 294,7
{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
483,7 → 536,10
{0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
498,6 → 554,33
{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
 
 
/drivers/include/drm/i915_drm.h
0,0 → 1,953
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef _UAPI_I915_DRM_H_
#define _UAPI_I915_DRM_H_
 
#include <drm/drm.h>
 
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
 
/* For use by IPS driver */
extern unsigned long i915_read_mch_val(void);
extern bool i915_gpu_raise(void);
extern bool i915_gpu_lower(void);
extern bool i915_gpu_busy(void);
extern bool i915_gpu_turbo_disable(void);
 
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
* of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14
 
typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
I915_RESUME_DMA = 0x03
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
} drm_i915_init_t;
 
typedef struct _drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
 
drm_handle_t front_handle;
int front_offset;
int front_size;
 
drm_handle_t back_handle;
int back_offset;
int back_size;
 
drm_handle_t depth_handle;
int depth_offset;
int depth_size;
 
drm_handle_t tex_handle;
int tex_offset;
int tex_size;
int log_tex_granularity;
int pitch;
int rotation; /* 0, 90, 180 or 270 */
int rotated_offset;
int rotated_size;
int rotated_pitch;
int virtualX, virtualY;
 
unsigned int front_tiled;
unsigned int back_tiled;
unsigned int depth_tiled;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
 
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
 
/* fill out some space for old userspace triple buffer */
drm_handle_t unused_handle;
__u32 unused1, unused2, unused3;
 
/* buffer object handles for static buffers. May change
* over the lifetime of the client.
*/
__u32 front_bo_handle;
__u32 back_bo_handle;
__u32 unused_bo_handle;
__u32 depth_bo_handle;
 
} drm_i915_sarea_t;
 
/* due to userspace building against these headers we need some compat here */
#define planeA_x pipeA_x
#define planeA_y pipeA_y
#define planeA_w pipeA_w
#define planeA_h pipeA_h
#define planeB_x pipeB_x
#define planeB_y pipeB_y
#define planeB_w pipeB_w
#define planeB_h pipeB_h
 
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
#define I915_BOX_FLIP 0x2
#define I915_BOX_WAIT 0x4
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
 
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
#define DRM_I915_FLIP 0x02
#define DRM_I915_BATCHBUFFER 0x03
#define DRM_I915_IRQ_EMIT 0x04
#define DRM_I915_IRQ_WAIT 0x05
#define DRM_I915_GETPARAM 0x06
#define DRM_I915_SETPARAM 0x07
#define DRM_I915_ALLOC 0x08
#define DRM_I915_FREE 0x09
#define DRM_I915_INIT_HEAP 0x0a
#define DRM_I915_CMDBUFFER 0x0b
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_I915_GEM_SET_CACHING 0x2f
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
 
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
 
/* As above, but pass a pointer to userspace buffer which can be
* validated by the kernel prior to sending to hardware.
*/
typedef struct _drm_i915_cmdbuffer {
char __user *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
 
/* Userspace can request & wait on irq's:
*/
typedef struct drm_i915_irq_emit {
int __user *irq_seq;
} drm_i915_irq_emit_t;
 
typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
 
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
 
typedef struct drm_i915_getparam {
int param;
int __user *value;
} drm_i915_getparam_t;
 
/* Ioctl to set kernel params:
*/
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
 
typedef struct drm_i915_setparam {
int param;
int value;
} drm_i915_setparam_t;
 
/* A memory manager for regions of shared memory:
*/
#define I915_MEM_REGION_AGP 1
 
typedef struct drm_i915_mem_alloc {
int region;
int alignment;
int size;
int __user *region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc_t;
 
typedef struct drm_i915_mem_free {
int region;
int region_offset;
} drm_i915_mem_free_t;
 
typedef struct drm_i915_mem_init_heap {
int region;
int size;
int start;
} drm_i915_mem_init_heap_t;
 
/* Allow memory manager to be torn down and re-initialized (eg on
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
} drm_i915_mem_destroy_heap_t;
 
/* Allow X server to configure which pipes to monitor for vblank signals
*/
#define DRM_I915_VBLANK_PIPE_A 1
#define DRM_I915_VBLANK_PIPE_B 2
 
typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
 
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
 
typedef struct drm_i915_hws_addr {
__u64 addr;
} drm_i915_hws_addr_t;
 
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_end;
};
 
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
__u32 pad;
};
 
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
__u32 pad;
/** Offset into the object to read from */
__u64 offset;
/** Length of data to read */
__u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
 
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
__u32 handle;
__u32 pad;
/** Offset into the object to write to */
__u64 offset;
/** Length of data to write */
__u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
 
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/** Offset in the object to map. */
__u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
__u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
};
 
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
 
/** New read domains */
__u32 read_domains;
 
/** New write domain */
__u32 write_domain;
};
 
struct drm_i915_gem_sw_finish {
/** Handle for the object */
__u32 handle;
};
 
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
__u32 target_handle;
 
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
__u32 delta;
 
/** Offset in the buffer the relocation entry will be written into */
__u64 offset;
 
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
__u64 presumed_offset;
 
/**
* Target memory domains read by this operation.
*/
__u32 read_domains;
 
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
__u32 write_domain;
};
 
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
 
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
 
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
 
/** Required alignment in graphics aperture */
__u64 alignment;
 
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
};
 
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
__u64 buffers_ptr;
__u32 buffer_count;
 
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
};
 
struct drm_i915_gem_exec_object2 {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
 
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
 
/** Required alignment in graphics aperture */
__u64 alignment;
 
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
 
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
 
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
*/
__u64 buffers_ptr;
__u32 buffer_count;
 
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
 
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
* Gen6+ only supports relative addressing to dynamic state (default) and
* absolute addressing.
*
* These flags are ignored for the BSD and BLT rings.
*/
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
 
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
 
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
#define i915_execbuffer2_get_context_id(eb2) \
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
 
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
__u32 pad;
 
/** alignment required within the aperture */
__u64 alignment;
 
/** Returned GTT offset of the buffer. */
__u64 offset;
};
 
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
__u32 handle;
__u32 pad;
};
 
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
 
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy;
};
 
#define I915_CACHING_NONE 0
#define I915_CACHING_CACHED 1
 
struct drm_i915_gem_caching {
/**
* Handle of the buffer to set/get the caching level of. */
__u32 handle;
 
/**
* Cacheing level to apply or return value
*
* bits0-15 are for generic caching control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 caching;
};
 
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
 
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
/* Seen by userland. */
#define I915_BIT_6_SWIZZLE_9_17 6
#define I915_BIT_6_SWIZZLE_9_10_17 7
 
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
__u32 handle;
 
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
__u32 tiling_mode;
 
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
__u32 stride;
 
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
 
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
__u32 handle;
 
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
__u32 tiling_mode;
 
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
 
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
__u64 aper_size;
 
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
__u64 aper_available_size;
};
 
struct drm_i915_get_pipe_from_crtc_id {
/** ID of CRTC being requested **/
__u32 crtc_id;
 
/** pipe of requested CRTC **/
__u32 pipe;
};
 
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
#define __I915_MADV_PURGED 2 /* internal state */
 
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
__u32 handle;
 
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
*/
__u32 madv;
 
/** Whether the backing store still exists. */
__u32 retained;
};
 
/* flags */
#define I915_OVERLAY_TYPE_MASK 0xff
#define I915_OVERLAY_YUV_PLANAR 0x01
#define I915_OVERLAY_YUV_PACKED 0x02
#define I915_OVERLAY_RGB 0x03
 
#define I915_OVERLAY_DEPTH_MASK 0xff00
#define I915_OVERLAY_RGB24 0x1000
#define I915_OVERLAY_RGB16 0x2000
#define I915_OVERLAY_RGB15 0x3000
#define I915_OVERLAY_YUV422 0x0100
#define I915_OVERLAY_YUV411 0x0200
#define I915_OVERLAY_YUV420 0x0300
#define I915_OVERLAY_YUV410 0x0400
 
#define I915_OVERLAY_SWAP_MASK 0xff0000
#define I915_OVERLAY_NO_SWAP 0x000000
#define I915_OVERLAY_UV_SWAP 0x010000
#define I915_OVERLAY_Y_SWAP 0x020000
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
 
#define I915_OVERLAY_FLAGS_MASK 0xff000000
#define I915_OVERLAY_ENABLE 0x01000000
 
struct drm_intel_overlay_put_image {
/* various flags and src format description */
__u32 flags;
/* source picture description */
__u32 bo_handle;
/* stride values and offsets are in bytes, buffer relative */
__u16 stride_Y; /* stride for packed formats */
__u16 stride_UV;
__u32 offset_Y; /* offset for packet formats */
__u32 offset_U;
__u32 offset_V;
/* in pixels */
__u16 src_width;
__u16 src_height;
/* to compensate the scaling factors for partially covered surfaces */
__u16 src_scan_width;
__u16 src_scan_height;
/* output crtc description */
__u32 crtc_id;
__u16 dst_x;
__u16 dst_y;
__u16 dst_width;
__u16 dst_height;
};
 
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
__s32 brightness;
__u32 contrast;
__u32 saturation;
__u32 gamma0;
__u32 gamma1;
__u32 gamma2;
__u32 gamma3;
__u32 gamma4;
__u32 gamma5;
};
 
/*
* Intel sprite handling
*
* Color keying works with a min/mask/max tuple. Both source and destination
* color keying is allowed.
*
* Source keying:
* Sprite pixels within the min & max values, masked against the color channels
* specified in the mask field, will be transparent. All other pixels will
* be displayed on top of the primary plane. For RGB surfaces, only the min
* and mask fields will be used; ranged compares are not allowed.
*
* Destination keying:
* Primary plane pixels that match the min value, masked against the color
* channels specified in the mask field, will be replaced by corresponding
* pixels from the sprite plane.
*
* Note that source & destination keying are exclusive; only one can be
* active on a given plane.
*/
 
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
__u32 plane_id;
__u32 min_value;
__u32 channel_mask;
__u32 max_value;
__u32 flags;
};
 
struct drm_i915_gem_wait {
/** Handle of BO we shall wait on */
__u32 bo_handle;
__u32 flags;
/** Number of nanoseconds to wait, Returns time remaining. */
__s64 timeout_ns;
};
 
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
__u32 pad;
};
 
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
 
struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
#endif /* _UAPI_I915_DRM_H_ */
/drivers/include/drm/intel-gtt.h
1,43 → 1,54
/* Common header for intel-gtt.ko and i915.ko */
 
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
 
const struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
/* Whether we idle the gpu before mapping/unmapping */
unsigned int do_idle_maps : 1;
} *intel_gtt_get(void);
 
void intel_gtt_chipset_flush(void);
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
struct scatterlist **sg_list, int *num_sg);
void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
unsigned int sg_len,
unsigned int pg_start,
unsigned int flags);
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags);
 
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
#define AGP_PHYS_MEMORY 2
 
/* New caching attributes for gen6/sandybridge */
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
 
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
#endif
/* Common header for intel-gtt.ko and i915.ko */
 
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
 
struct agp_bridge_data;
 
const struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
/* Whether we idle the gpu before mapping/unmapping */
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */
phys_addr_t gma_bus_addr;
} *intel_gtt_get(void);
 
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(void);
 
bool intel_enable_gtt(void);
 
void intel_gtt_chipset_flush(void);
void intel_gtt_insert_sg_entries(struct pagelist *st, unsigned int pg_start,
unsigned int flags);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
 
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
#define AGP_PHYS_MEMORY 2
 
/* New caching attributes for gen6/sandybridge */
#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
 
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_gfx_mapped;
#endif
 
#endif
/drivers/include/drm/radeon_drm.h
33,7 → 33,7
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
 
#include "drm.h"
#include <drm/drm.h>
 
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
509,6 → 509,7
#define DRM_RADEON_GEM_SET_TILING 0x28
#define DRM_RADEON_GEM_GET_TILING 0x29
#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_RADEON_GEM_VA 0x2b
 
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
807,9 → 808,19
#define RADEON_TILING_MICRO 0x2
#define RADEON_TILING_SWAP_16BIT 0x4
#define RADEON_TILING_SWAP_32BIT 0x8
#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
* when mapped - i.e. front buffer */
/* this object requires a surface when mapped - i.e. front buffer */
#define RADEON_TILING_SURFACE 0x10
#define RADEON_TILING_MICRO_SQUARE 0x20
#define RADEON_TILING_EG_BANKW_SHIFT 8
#define RADEON_TILING_EG_BANKW_MASK 0xf
#define RADEON_TILING_EG_BANKH_SHIFT 12
#define RADEON_TILING_EG_BANKH_MASK 0xf
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
 
struct drm_radeon_gem_set_tiling {
uint32_t handle;
897,6 → 908,7
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
#define RADEON_CHUNK_ID_FLAGS 0x03
#define RADEON_CHUNK_ID_CONST_IB 0x04
 
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
914,7 → 926,6
};
 
/* drm_radeon_cs_reloc.flags */
#define RADEON_RELOC_DONT_SYNC 0x01
 
struct drm_radeon_cs_reloc {
uint32_t handle;
951,6 → 962,10
#define RADEON_INFO_VA_START 0x0e
/* maximum size of ib using the virtual memory cs */
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
/* max pipes - needed for compute shaders */
#define RADEON_INFO_MAX_PIPES 0x10
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
#define RADEON_INFO_TIMESTAMP 0x11
 
struct drm_radeon_info {
uint32_t request;
/drivers/include/drm/ttm/ttm_bo_api.h
81,14 → 81,17
*/
 
struct ttm_mem_reg {
struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
void *mm_node;
unsigned long start;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
// struct ttm_bus_placement bus;
};
 
 
/**
* enum ttm_bo_type
*
/drivers/include/linux/asm/alternative.h
129,7 → 129,7
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
#define ASM_OUTPUT2(a, b) a, b
#define ASM_OUTPUT2(a) a
 
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
/drivers/include/linux/asm/atomic_32.h
266,8 → 266,8
u64 __aligned(8) counter;
} atomic64_t;
 
#define ATOMIC64_INIT(val) { (val) }
 
 
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
 
/**
278,8 → 278,22
* Atomically xchgs the value of @ptr to @new_val and returns
* the old value.
*/
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
long long o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
 
asm volatile(
"1: \n\t"
"cmpxchg8b (%%esi) \n\t"
"jnz 1b \n\t"
:"=&A" (o)
:"S" (v), "b" (low), "c" (high)
: "memory", "cc");
return o;
}
 
/**
* atomic64_set - set atomic64 variable
* @ptr: pointer to type atomic64_t
287,8 → 301,21
*
* Atomically sets the value of @ptr to @new_val.
*/
extern void atomic64_set(atomic64_t *ptr, u64 new_val);
 
static inline void atomic64_set(atomic64_t *v, long long i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
asm volatile (
"1: \n\t"
"cmpxchg8b (%%esi) \n\t"
"jnz 1b \n\t"
:
:"S" (v), "b" (low), "c" (high)
: "eax", "edx", "memory", "cc");
}
 
 
/**
* atomic64_read - read atomic64 variable
* @ptr: pointer to type atomic64_t
317,7 → 344,6
return res;
}
 
extern u64 atomic64_read(atomic64_t *ptr);
 
/**
* atomic64_add_return - add and return
/drivers/include/linux/asm/bitops.h
15,6 → 15,8
#include <linux/compiler.h>
#include <asm/alternative.h>
 
#define BIT_64(n) (U64_C(1) << (n))
 
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
262,6 → 264,13
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*
* Note: the operation is performed atomically with respect to
* the local CPU, but not other CPUs. Portable code should not
* rely on this behaviour.
* KVM relies on this behaviour on x86 for modifying memory that is also
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
309,7 → 318,7
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
(addr[nr / BITS_PER_LONG])) != 0;
}
 
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
346,7 → 355,7
*/
static inline unsigned long __ffs(unsigned long word)
{
asm("bsf %1,%0"
asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
360,7 → 369,7
*/
static inline unsigned long ffz(unsigned long word)
{
asm("bsf %1,%0"
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
380,6 → 389,8
return word;
}
 
#undef ADDR
 
#ifdef __KERNEL__
/**
* ffs - find first set bit in word
398,7 → 409,7
#ifdef CONFIG_X86_CMOV
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=r" (r) : "rm" (x), "r" (-1));
: "=&r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
/drivers/include/linux/asm/cpufeature.h
6,7 → 6,7
 
#include <asm/required-features.h>
 
#define NCAPINTS 9 /* N 32-bit words worth of info */
#define NCAPINTS 10 /* N 32-bit words worth of info */
 
/*
* Note: If the comment begins with a quoted string, that string is used
89,7 → 89,7
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
/* 21 available, was AMD_C1E */
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
97,6 → 97,7
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
 
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
114,6 → 115,7
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
120,10 → 122,13
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */
#define X86_FEATURE_AES (4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
 
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
150,24 → 155,63
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc
* CPUID levels like 0x6, 0xA etc, word 7
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
 
/* Virtualization flags: Linux defined */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
#define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */
#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
 
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
#include <linux/bitops.h>
178,8 → 222,7
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
 
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && \
#define REQUIRED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
187,10 → 230,18
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
? 1 : \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
 
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
test_cpu_cap(c, bit))
 
#define this_cpu_has(bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
 
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
 
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
219,7 → 270,9
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
247,8 → 300,14
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
 
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
/drivers/include/linux/asm/div64.h
1,60 → 1,66
#ifndef _ASM_X86_DIV64_H
#define _ASM_X86_DIV64_H
 
#ifdef CONFIG_X86_32
 
#include <linux/types.h>
 
/*
* do_div() is NOT a C function. It wants to return
* two values (the quotient and the remainder), but
* since that doesn't work very well in C, what it
* does is:
*
* - modifies the 64-bit dividend _in_place_
* - returns the 32-bit remainder
*
* This ends up being the most efficient "calling
* convention" on x86.
*/
#define do_div(n, base) \
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
asm("divl %2":"=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n) : "a" (__low), "d" (__high)); \
__mod; \
})
 
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
union {
u64 v64;
u32 v32[2];
} d = { dividend };
u32 upper;
 
upper = d.v32[1];
d.v32[1] = 0;
if (upper >= divisor) {
d.v32[1] = upper / divisor;
upper %= divisor;
}
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
return d.v64;
}
#define div_u64_rem div_u64_rem
 
#else
# include <asm-generic/div64.h>
#endif /* CONFIG_X86_32 */
 
#endif /* _ASM_X86_DIV64_H */
#ifndef _ASM_X86_DIV64_H
#define _ASM_X86_DIV64_H
 
#ifdef CONFIG_X86_32
 
#include <linux/types.h>
#include <linux/log2.h>
 
/*
* do_div() is NOT a C function. It wants to return
* two values (the quotient and the remainder), but
* since that doesn't work very well in C, what it
* does is:
*
* - modifies the 64-bit dividend _in_place_
* - returns the 32-bit remainder
*
* This ends up being the most efficient "calling
* convention" on x86.
*/
#define do_div(n, base) \
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
__mod = n & (__base - 1); \
n >>= ilog2(__base); \
} else { \
asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
asm("divl %2" : "=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
} \
__mod; \
})
 
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
union {
u64 v64;
u32 v32[2];
} d = { dividend };
u32 upper;
 
upper = d.v32[1];
d.v32[1] = 0;
if (upper >= divisor) {
d.v32[1] = upper / divisor;
upper %= divisor;
}
asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
"rm" (divisor), "0" (d.v32[0]), "1" (upper));
return d.v64;
}
#define div_u64_rem div_u64_rem
 
#else
# include <asm-generic/div64.h>
#endif /* CONFIG_X86_32 */
 
#endif /* _ASM_X86_DIV64_H */
/drivers/include/linux/asm/required-features.h
84,5 → 84,7
#define REQUIRED_MASK5 0
#define REQUIRED_MASK6 0
#define REQUIRED_MASK7 0
#define REQUIRED_MASK8 0
#define REQUIRED_MASK9 0
 
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
/drivers/include/linux/backlight.h
--- include/linux/bitops.h (revision 3030)
+++ include/linux/bitops.h (revision 3031)
@@ -26,6 +26,23 @@
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
@@ -50,6 +67,26 @@
}
/**
+ * rol64 - rotate a 64-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 rol64(__u64 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (64 - shift));
+}
+
+/**
+ * ror64 - rotate a 64-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 ror64(__u64 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (64 - shift));
+}
+
+/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
/drivers/include/linux/bug.h
0,0 → 1,12
#ifndef _ASM_GENERIC_BUG_H
#define _ASM_GENERIC_BUG_H
 
 
 
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
unlikely(__ret_warn_on); \
})
 
 
#endif
/drivers/include/linux/compiler-gcc.h
82,7 → 82,8
*/
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a,b) __attribute__((format(printf,a,b)))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
/drivers/include/linux/compiler-gcc4.h
29,6 → 29,7
the kernel context */
#define __cold __attribute__((__cold__))
 
#define __linktime_error(message) __attribute__((__error__(message)))
 
#if __GNUC_MINOR__ >= 5
/*
48,10 → 49,17
#endif
#endif
 
#if __GNUC_MINOR__ >= 6
/*
* Tell the optimizer that something else uses this function or variable.
*/
#define __visible __attribute__((externally_visible))
#endif
 
#if __GNUC_MINOR__ > 0
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
#if __GNUC_MINOR__ >= 3 && !defined(__CHECKER__)
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
/drivers/include/linux/compiler.h
236,7 → 236,7
 
/*
* Rather then using noinline to prevent stack consumption, use
* noinline_for_stack instead. For documentaiton reasons.
* noinline_for_stack instead. For documentation reasons.
*/
#define noinline_for_stack noinline
 
278,6 → 278,10
# define __section(S) __attribute__ ((__section__(#S)))
#endif
 
#ifndef __visible
#define __visible
#endif
 
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
293,7 → 297,9
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
 
#ifndef __linktime_error
# define __linktime_error(message)
#endif
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
/drivers/include/linux/delay.h
--- include/linux/dmapool.h (revision 3030)
+++ include/linux/dmapool.h (revision 3031)
@@ -21,6 +21,12 @@
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
+/*
+ * Managed DMA pool
+ */
+struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
+ size_t size, size_t align, size_t allocation);
+void dmam_pool_destroy(struct dma_pool *pool);
#endif
/drivers/include/linux/errno-base.h
0,0 → 1,39
#ifndef _ASM_GENERIC_ERRNO_BASE_H
#define _ASM_GENERIC_ERRNO_BASE_H
 
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
 
#endif
/drivers/include/linux/export.h
0,0 → 1,19
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
* exporting a simple symbol or two.
*
* If you feel the need to add #include <linux/foo.h> to this file
* then you are doing something wrong and should go away silently.
*/
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_GPL_FUTURE(sym)
#define EXPORT_UNUSED_SYMBOL(sym)
#define EXPORT_UNUSED_SYMBOL_GPL(sym)
 
#define THIS_MODULE ((struct module *)0)
 
#endif /* _LINUX_EXPORT_H */
/drivers/include/linux/fb.h
549,6 → 549,10
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
/* A hardware display blank early change occured */
#define FB_EARLY_EVENT_BLANK 0x10
/* A hardware display blank revert early change occured */
#define FB_R_EARLY_EVENT_BLANK 0x11
 
struct fb_event {
struct fb_info *info;
599,6 → 603,7
struct mutex lock; /* mutex that protects the page list */
struct list_head pagelist; /* list of touched pages */
/* callback */
void (*first_io)(struct fb_info *info);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
990,6 → 995,7
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
1112,6 → 1118,7
 
/* drivers/video/fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to);
extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to);
1139,6 → 1146,7
 
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
 
struct fb_modelist {
struct list_head list;
/drivers/include/linux/i2c-algo-bit.h
15,7 → 15,8
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA. */
/* ------------------------------------------------------------------------- */
 
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
49,5 → 50,6
 
int i2c_bit_add_bus(struct i2c_adapter *);
int i2c_bit_add_numbered_bus(struct i2c_adapter *);
extern const struct i2c_algorithm i2c_bit_algo;
 
#endif /* _LINUX_I2C_ALGO_BIT_H */
/drivers/include/linux/i2c.h
17,12 → 17,12
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA. */
/* ------------------------------------------------------------------------- */
 
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
Frodo Looijaard <frodol@dds.nl> */
 
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
 
32,6 → 32,8
#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
 
extern struct bus_type i2c_bus_type;
extern struct device_type i2c_adapter_type;
 
/* --- General options ------------------------------------------------ */
 
70,7 → 72,7
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
*
* For automatic device detection, both @detect and @address_data must
* For automatic device detection, both @detect and @address_list must
* be defined. @class should also be set, otherwise only devices forced
* with module parameters will be created. The detect function must
* fill at least the name field of the i2c_board_info structure it is
271,6 → 273,8
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
 
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
/drivers/include/linux/ioport.h
35,8 → 35,9
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
 
#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */
#define IORESOURCE_IO 0x00000100
#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
#define IORESOURCE_MEM 0x00000200
#define IORESOURCE_REG 0x00000300 /* Register offsets */
#define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800
#define IORESOURCE_BUS 0x00001000
/drivers/include/linux/jiffies.h
71,16 → 71,10
/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
 
#define jiffies GetTimerTicks()
 
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
return (u64)GetTimerTicks();
}
#endif
 
/*
* These inlines deal with timer wrapping correctly. You are
295,7 → 289,13
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
 
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
}
 
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
/drivers/include/linux/kernel.h
29,6 → 29,7
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
 
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
306,7 → 307,10
writel(val >> 32, addr+4);
}
 
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
 
 
#define mmiowb() barrier()
 
#define dev_err(dev, format, arg...) \
329,6 → 333,34
unsigned int dma_length;
};
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
 
#define SG_MAX_SINGLE_ALLOC (4096 / sizeof(struct scatterlist))
 
struct scatterlist *sg_next(struct scatterlist *sg);
 
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
 
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
 
static inline addr_t sg_page(struct scatterlist *sg)
{
return (addr_t)((sg)->page_link & ~0x3);
}
 
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 
 
 
struct page
{
unsigned int addr;
347,6 → 379,10
*/
};
 
struct pagelist {
dma_addr_t *page;
unsigned int nents;
};
 
#endif
 
/drivers/include/linux/log2.h
0,0 → 1,208
/* Integer base 2 logarithm calculation
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
 
#ifndef _LINUX_LOG2_H
#define _LINUX_LOG2_H
 
#include <linux/types.h>
#include <linux/bitops.h>
 
/*
* deal with unrepresentable constant logarithms
*/
extern __attribute__((const, noreturn))
int ____ilog2_NaN(void);
 
/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
static inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
}
#endif
 
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
static inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
#endif
 
/*
* Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
*/
 
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
 
/*
* round up to nearest power of two
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
{
return 1UL << fls_long(n - 1);
}
 
/*
* round down to nearest power of two
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
{
return 1UL << (fls_long(n) - 1);
}
 
/**
* ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
* @n - parameter
*
* constant-capable log of base 2 calculation
* - this can be used to initialise global variables from constant data, hence
* the massive ternary operator construction
*
* selects the appropriately-sized optimised version depending on sizeof(n)
*/
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 1 ? ____ilog2_NaN() : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
(n) & (1ULL << 60) ? 60 : \
(n) & (1ULL << 59) ? 59 : \
(n) & (1ULL << 58) ? 58 : \
(n) & (1ULL << 57) ? 57 : \
(n) & (1ULL << 56) ? 56 : \
(n) & (1ULL << 55) ? 55 : \
(n) & (1ULL << 54) ? 54 : \
(n) & (1ULL << 53) ? 53 : \
(n) & (1ULL << 52) ? 52 : \
(n) & (1ULL << 51) ? 51 : \
(n) & (1ULL << 50) ? 50 : \
(n) & (1ULL << 49) ? 49 : \
(n) & (1ULL << 48) ? 48 : \
(n) & (1ULL << 47) ? 47 : \
(n) & (1ULL << 46) ? 46 : \
(n) & (1ULL << 45) ? 45 : \
(n) & (1ULL << 44) ? 44 : \
(n) & (1ULL << 43) ? 43 : \
(n) & (1ULL << 42) ? 42 : \
(n) & (1ULL << 41) ? 41 : \
(n) & (1ULL << 40) ? 40 : \
(n) & (1ULL << 39) ? 39 : \
(n) & (1ULL << 38) ? 38 : \
(n) & (1ULL << 37) ? 37 : \
(n) & (1ULL << 36) ? 36 : \
(n) & (1ULL << 35) ? 35 : \
(n) & (1ULL << 34) ? 34 : \
(n) & (1ULL << 33) ? 33 : \
(n) & (1ULL << 32) ? 32 : \
(n) & (1ULL << 31) ? 31 : \
(n) & (1ULL << 30) ? 30 : \
(n) & (1ULL << 29) ? 29 : \
(n) & (1ULL << 28) ? 28 : \
(n) & (1ULL << 27) ? 27 : \
(n) & (1ULL << 26) ? 26 : \
(n) & (1ULL << 25) ? 25 : \
(n) & (1ULL << 24) ? 24 : \
(n) & (1ULL << 23) ? 23 : \
(n) & (1ULL << 22) ? 22 : \
(n) & (1ULL << 21) ? 21 : \
(n) & (1ULL << 20) ? 20 : \
(n) & (1ULL << 19) ? 19 : \
(n) & (1ULL << 18) ? 18 : \
(n) & (1ULL << 17) ? 17 : \
(n) & (1ULL << 16) ? 16 : \
(n) & (1ULL << 15) ? 15 : \
(n) & (1ULL << 14) ? 14 : \
(n) & (1ULL << 13) ? 13 : \
(n) & (1ULL << 12) ? 12 : \
(n) & (1ULL << 11) ? 11 : \
(n) & (1ULL << 10) ? 10 : \
(n) & (1ULL << 9) ? 9 : \
(n) & (1ULL << 8) ? 8 : \
(n) & (1ULL << 7) ? 7 : \
(n) & (1ULL << 6) ? 6 : \
(n) & (1ULL << 5) ? 5 : \
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
(n) & (1ULL << 1) ? 1 : \
(n) & (1ULL << 0) ? 0 : \
____ilog2_NaN() \
) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
)
 
/**
* roundup_pow_of_two - round the given value up to nearest power of two
* @n - parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
* - this can be used to initialise global variables from constant data
*/
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(n == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
)
 
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
* @n - parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
* - this can be used to initialise global variables from constant data
*/
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)
 
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
*
* The first few values calculated by this routine:
* ob2(0) = 0
* ob2(1) = 0
* ob2(2) = 1
* ob2(3) = 2
* ob2(4) = 2
* ob2(5) = 3
* ... and so on.
*/
 
#define order_base_2(n) ilog2(roundup_pow_of_two(n))
 
#endif /* _LINUX_LOG2_H */
/drivers/include/linux/math64.h
0,0 → 1,121
#ifndef _LINUX_MATH64_H
#define _LINUX_MATH64_H
 
#include <linux/types.h>
#include <asm/div64.h>
 
#if BITS_PER_LONG == 64
 
#define div64_long(x,y) div64_s64((x),(y))
 
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
*/
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
 
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
 
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
return dividend / divisor;
}
 
/**
* div64_s64 - signed 64bit divide with 64bit divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
return dividend / divisor;
}
 
#elif BITS_PER_LONG == 32
 
#define div64_long(x,y) div_s64((x),(y))
 
#ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = do_div(dividend, divisor);
return dividend;
}
#endif
 
#ifndef div_s64_rem
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
#endif
 
#ifndef div64_u64
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
 
#ifndef div64_s64
extern s64 div64_s64(s64 dividend, s64 divisor);
#endif
 
#endif /* BITS_PER_LONG */
 
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
#endif
 
/**
* div_s64 - signed 64bit divide with 32bit divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
{
s32 remainder;
return div_s64_rem(dividend, divisor, &remainder);
}
#endif
 
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
 
static __always_inline u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;
 
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
asm("" : "+rm"(dividend));
 
dividend -= divisor;
ret++;
}
 
*remainder = dividend;
 
return ret;
}
 
#endif /* _LINUX_MATH64_H */
/drivers/include/linux/mod_devicetable.h
78,6 → 78,9
* of a given interface; other interfaces may support other classes.
* @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass.
* @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass.
* @bInterfaceNumber: Number of interface; composite devices may use
* fixed interface numbers to differentiate between vendor-specific
* interfaces.
* @driver_info: Holds information used by the driver. Usually it holds
* a pointer to a descriptor understood by the driver, or perhaps
* device flags.
130,12 → 133,15
#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080
#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
#define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
 
#define HID_ANY_ID (~0)
#define HID_BUS_ANY 0xffff
#define HID_GROUP_ANY 0x0000
 
struct hid_device_id {
__u16 bus;
__u16 pad1;
__u16 group;
__u32 vendor;
__u32 product;
kernel_ulong_t driver_data
222,7 → 228,7
char type[32];
char compatible[128];
#ifdef __KERNEL__
void *data;
const void *data;
#else
kernel_ulong_t data;
#endif
/drivers/include/linux/module.h
11,15 → 11,14
#include <linux/kernel.h>
 
 
#define EXPORT_SYMBOL(x)
 
#define MODULE_FIRMWARE(x)
#define MODULE_AUTHOR(x);
#define MODULE_DESCRIPTION(x);
#define MODULE_LICENSE(x);
 
#define MODULE_PARM_DESC(_parm, desc)
 
#define MODULE_AUTHOR(x)
#define MODULE_DESCRIPTION(x)
#define MODULE_LICENSE(x)
 
struct module {};
 
#endif /* _LINUX_MODULE_H */
/drivers/include/linux/moduleparam.h
0,0 → 1,3
 
#define MODULE_PARM_DESC(_parm, desc)
#define module_param_named(name, value, type, perm)
/drivers/include/linux/pci.h
13,11 → 13,10
* PCI to PCI Bridge Specification
* PCI System Design Guide
*/
 
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
 
#include <types.h>
#include <linux/types.h>
#include <list.h>
#include <linux/pci_regs.h> /* The pci register defines */
#include <ioport.h>
276,6 → 275,20
#define PCI_D3cold ((pci_power_t __force) 4)
#define PCI_UNKNOWN ((pci_power_t __force) 5)
#define PCI_POWER_ERROR ((pci_power_t __force) -1)
 
/* Remember to update this when the list above changes! */
extern const char *pci_power_names[];
 
static inline const char *pci_power_name(pci_power_t state)
{
return pci_power_names[1 + (int) state];
}
 
#define PCI_PM_D2_DELAY 200
#define PCI_PM_D3_WAIT 10
#define PCI_PM_D3COLD_WAIT 100
#define PCI_PM_BUS_WAIT 50
 
/** The pci_channel state describes connectivity between the CPU and
* the pci device. If some PCI bus between here and the pci device
* has crashed or locked up, this info is reflected here.
346,9 → 359,10
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
u8 pcie_cap; /* PCI-E capability offset */
u8 pcie_type; /* PCI-E device/port type */
u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
u8 rom_base_reg; /* which config register controls the ROM */
u8 pin; /* which interrupt pin this device uses */
u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */
 
// struct pci_driver *driver; /* which driver has allocated this device */
uint64_t dma_mask; /* Mask of the bits of bus address this
367,14 → 381,25
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_interrupt:1;
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
unsigned int no_d3cold:1; /* D3cold is forbidden */
unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
unsigned int mmio_always_on:1; /* disallow turning off io/mem
decoding during bar sizing */
unsigned int wakeup_prepared:1;
unsigned int runtime_d3cold:1; /* whether go through runtime
D3cold, not set for devices
powered on/off by the
corresponding bridge */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
 
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
#endif
 
pci_channel_state_t error_state; /* current connectivity state */
struct device dev; /* Generic device interface */
387,7 → 412,6
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
 
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
396,7 → 420,7
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
unsigned int msi_enabled:1;
411,15 → 435,15
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
// pci_dev_flags_t dev_flags;
// atomic_t enable_cnt; /* pci_enable_device has been called */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
// pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
 
// u32 saved_config_space[16]; /* config space saved at suspend time */
// struct hlist_head saved_cap_space;
// struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
// int rom_attr_enabled; /* has display of the rom attribute been enabled? */
// struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
// struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
 
 
};
 
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
443,6 → 467,7
struct list_head slots; /* list of slots on this bus */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
struct list_head resources; /* address space routed to this bus */
struct resource busn_res; /* bus numbers routed to this bus */
 
struct pci_ops *ops; /* configuration access functions */
void *sysdata; /* hook for sys-specific extension */
450,8 → 475,6
 
unsigned char number; /* bus number */
unsigned char primary; /* number of primary bridge */
unsigned char secondary; /* number of secondary bridge */
unsigned char subordinate; /* max number of subordinate buses */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
 
571,6 → 594,16
return !!pci_pcie_cap(dev);
}
 
/**
* pci_pcie_type - get the PCIe device/port type
* @dev: PCI device
*/
static inline int pci_pcie_type(const struct pci_dev *dev)
{
return (dev->pcie_flags_reg & PCI_EXP_FLAGS_TYPE) >> 4;
}
 
 
static inline int pci_iov_init(struct pci_dev *dev)
{
return -ENODEV;
/drivers/include/linux/pci_regs.h
26,6 → 26,7
* Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_STD_HEADER_SIZEOF 64
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
125,7 → 126,8
#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
#define PCI_IO_RANGE_TYPE_16 0x00
#define PCI_IO_RANGE_TYPE_32 0x01
#define PCI_IO_RANGE_MASK (~0x0fUL)
#define PCI_IO_RANGE_MASK (~0x0fUL) /* Standard 4K I/O windows */
#define PCI_IO_1K_RANGE_MASK (~0x03UL) /* Intel 1K I/O windows */
#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
#define PCI_MEMORY_LIMIT 0x22
209,9 → 211,12
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_ID_MAX PCI_CAP_ID_AF
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
276,6 → 281,7
#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
#define PCI_CAP_VPD_SIZEOF 8
 
/* Slot Identification */
 
297,8 → 303,10
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
 
/* MSI-X registers */
#define PCI_MSIX_FLAGS 2
308,6 → 316,7
#define PCI_MSIX_TABLE 4
#define PCI_MSIX_PBA 8
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
 
/* MSI-X entry's format */
#define PCI_MSIX_ENTRY_SIZE 16
338,6 → 347,7
#define PCI_AF_CTRL_FLR 0x01
#define PCI_AF_STATUS 5
#define PCI_AF_STATUS_TP 0x01
#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
 
/* PCI-X registers */
 
374,6 → 384,10
#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
#define PCI_X_ECC_CSR 8 /* ECC control and status */
#define PCI_CAP_PCIX_SIZEOF_V0 8 /* size of registers for Version 0 */
#define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */
#define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */
 
/* PCI Bridge Subsystem ID registers */
 
391,8 → 405,9
#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIE Bridge */
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
461,6 → 476,7
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints end here */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
506,6 → 522,12
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
/*
* Note that the following PCI Express 'Capability Structure' registers
* were introduced with 'Capability Version' 0x2 (v2). These registers
* do not exist on devices with Capability Version 1. Use pci_pcie_cap2()
* to use these fields safely.
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_LTR 0x800 /* Latency tolerance reporting */
520,7 → 542,14
#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
 
/* Extended Capabilities (PCI-X 2.0 and Express) */
528,21 → 557,43
#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
 
#define PCI_EXT_CAP_ID_ERR 1
#define PCI_EXT_CAP_ID_VC 2
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_VNDR 11
#define PCI_EXT_CAP_ID_ACS 13
#define PCI_EXT_CAP_ID_ARI 14
#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
#define PCI_EXT_CAP_ID_LTR 24
#define PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */
#define PCI_EXT_CAP_ID_VC 0x02 /* Virtual Channel Capability */
#define PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */
#define PCI_EXT_CAP_ID_PWR 0x04 /* Power Budgeting */
#define PCI_EXT_CAP_ID_RCLD 0x05 /* Root Complex Link Declaration */
#define PCI_EXT_CAP_ID_RCILC 0x06 /* Root Complex Internal Link Control */
#define PCI_EXT_CAP_ID_RCEC 0x07 /* Root Complex Event Collector */
#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor Specific */
#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
#define PCI_EXT_CAP_ID_ATS 0x0F /* Address Translation Services */
#define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* reserved for AMD */
#define PCI_EXT_CAP_ID_REBAR 0x15 /* resizable BAR */
#define PCI_EXT_CAP_ID_DPA 0x16 /* dynamic power alloc */
#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH request */
#define PCI_EXT_CAP_ID_LTR 0x18 /* latency tolerance reporting */
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
 
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
 
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
552,6 → 603,11
#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
#define PCI_ERR_UNC_ACSV 0x00200000 /* ACS Violation */
#define PCI_ERR_UNC_INTN 0x00400000 /* internal error */
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
/* Same bits as above */
#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
562,6 → 618,9
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
/* Same bits as above */
#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
593,12 → 652,18
 
/* Virtual Channel */
#define PCI_VC_PORT_REG1 4
#define PCI_VC_REG1_EVCC 0x7 /* extended vc count */
#define PCI_VC_PORT_REG2 8
#define PCI_VC_REG2_32_PHASE 0x2
#define PCI_VC_REG2_64_PHASE 0x4
#define PCI_VC_REG2_128_PHASE 0x8
#define PCI_VC_PORT_CTRL 12
#define PCI_VC_PORT_STATUS 14
#define PCI_VC_RES_CAP 16
#define PCI_VC_RES_CTRL 20
#define PCI_VC_RES_STATUS 26
#define PCI_CAP_VC_BASE_SIZEOF 0x10
#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
 
/* Power Budgeting */
#define PCI_PWR_DSR 4 /* Data Select Register */
611,7 → 676,14
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
#define PCI_PWR_CAP 12 /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
#define PCI_EXT_CAP_PWR_SIZEOF 16
 
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
#define PCI_VNDR_HEADER_ID(x) ((x) & 0xffff)
#define PCI_VNDR_HEADER_REV(x) (((x) >> 16) & 0xf)
#define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff)
 
/*
* Hypertransport sub capability types
*
643,6 → 715,8
#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
 
/* Alternative Routing-ID Interpretation */
#define PCI_ARI_CAP 0x04 /* ARI Capability Register */
653,6 → 727,7
#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
#define PCI_EXT_CAP_ARI_SIZEOF 8
 
/* Address Translation Service */
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
662,26 → 737,29
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
#define PCI_EXT_CAP_ATS_SIZEOF 8
 
/* Page Request Interface */
#define PCI_PRI_CAP 0x13 /* PRI capability ID */
#define PCI_PRI_CONTROL_OFF 0x04 /* Offset of control register */
#define PCI_PRI_STATUS_OFF 0x06 /* Offset of status register */
#define PCI_PRI_ENABLE 0x0001 /* Enable mask */
#define PCI_PRI_RESET 0x0002 /* Reset bit mask */
#define PCI_PRI_STATUS_RF 0x0001 /* Request Failure */
#define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ_OFF 0x08 /* Cap offset for max reqs supported */
#define PCI_PRI_ALLOC_REQ_OFF 0x0c /* Cap offset for max reqs allowed */
#define PCI_PRI_CTRL 0x04 /* PRI control register */
#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */
#define PCI_PRI_CTRL_RESET 0x02 /* Reset */
#define PCI_PRI_STATUS 0x06 /* PRI status register */
#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
 
/* PASID capability */
#define PCI_PASID_CAP 0x1b /* PASID capability ID */
#define PCI_PASID_CAP_OFF 0x04 /* PASID feature register */
#define PCI_PASID_CONTROL_OFF 0x06 /* PASID control register */
#define PCI_PASID_ENABLE 0x01 /* Enable/Supported bit */
#define PCI_PASID_EXEC 0x02 /* Exec permissions Enable/Supported */
#define PCI_PASID_PRIV 0x04 /* Priviledge Mode Enable/Support */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
#define PCI_PASID_CAP_PRIV 0x04 /* Priviledge Mode Supported */
#define PCI_PASID_CTRL 0x06 /* PASID control register */
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
 
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
713,6 → 791,7
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
#define PCI_EXT_CAP_SRIOV_SIZEOF 64
 
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
719,6 → 798,7
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
#define PCI_EXT_CAP_LTR_SIZEOF 8
 
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
729,7 → 809,38
#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
#define PCI_ACS_EC 0x20 /* P2P Egress Control */
#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
 
#define PCI_VSEC_HDR 4 /* extended cap - vendor specific */
#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
 
/* sata capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
#define PCI_SATA_SIZEOF_SHORT 8
#define PCI_SATA_SIZEOF_LONG 16
 
/* resizable BARs */
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
 
/* dynamic power allocation */
#define PCI_DPA_CAP 4 /* capability register */
#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
 
/* TPH Requester */
#define PCI_TPH_CAP 4 /* capability register */
#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
 
#endif /* LINUX_PCI_REGS_H */
/drivers/include/linux/poison.h
40,12 → 40,6
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
 
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
#else
#define MEMBLOCK_INACTIVE 0x44c9e71bUL
#endif
 
#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
 
/drivers/include/linux/spinlock.h
344,4 → 344,10
# include <linux/spinlock_api_up.h>
#endif
 
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
};
 
#endif /* __LINUX_SPINLOCK_H */
/drivers/include/linux/spinlock_api_up.h
31,7 → 31,7
do { local_bh_disable(); __LOCK(lock); } while (0)
 
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
do { asm volatile ("cli \n"); __LOCK(lock); } while (0)
 
#define __LOCK_IRQSAVE(lock, flags) \
do { \
51,7 → 51,7
__release(lock); (void)(lock); } while (0)
 
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
do { asm volatile ("sti \n"); __UNLOCK(lock); } while (0)
 
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { \
/drivers/include/linux/types.h
24,7 → 24,8
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef unsigned short umode_t;
typedef __u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
252,9 → 253,7
typedef unsigned int addr_t;
typedef unsigned int count_t;
 
# define WARN(condition, format...)
 
 
#define false 0
#define true 1
 
267,14 → 266,6
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 
 
 
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
 
#define DRM_INFO(fmt, arg...) dbgprintf("DRM: "fmt , ##arg)
 
#define DRM_ERROR(fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
 
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
 
 
345,24 → 336,7
#define PAGE_MASK (~(PAGE_SIZE-1))
 
 
#define do_div(n, base) \
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
asm("divl %2":"=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n) : "a" (__low), "d" (__high)); \
__mod; \
})
 
 
 
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__)
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__)
 
375,4 → 349,9
 
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
 
#ifndef __read_mostly
#define __read_mostly
#endif
 
 
#endif /* _LINUX_TYPES_H */
/drivers/include/linux/wait.h
36,6 → 36,40
} while (0)
 
 
 
 
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
do{ \
wait_queue_t __wait = { \
.task_list = LIST_HEAD_INIT(__wait.task_list), \
.evnt = CreateEvent(NULL, MANUAL_DESTROY), \
}; \
u32 flags; \
\
spin_lock_irqsave(&wq.lock, flags); \
if (list_empty(&__wait.task_list)) \
__add_wait_queue(&wq, &__wait); \
spin_unlock_irqrestore(&wq.lock, flags); \
\
for(;;){ \
if (condition) \
break; \
WaitEvent(__wait.evnt); \
}; \
if (!list_empty_careful(&__wait.task_list)) { \
spin_lock_irqsave(&wq.lock, flags); \
list_del_init(&__wait.task_list); \
spin_unlock_irqrestore(&wq.lock, flags); \
}; \
DestroyEvent(__wait.evnt); \
} while (0); \
__ret; \
})
 
 
 
#define wait_event(wq, condition) \
do{ \
wait_queue_t __wait = { \
63,6 → 97,8
} while (0)
 
 
 
 
static inline
void wake_up_all(wait_queue_head_t *q)
{
127,10 → 163,13
struct work_struct work;
};
 
 
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
unsigned int flags, int max_active);
 
 
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
 
140,5 → 179,12
(_work)->work.func = _func; \
} while (0)
 
 
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
 
 
#endif
 
/drivers/include/syscall.h
39,6 → 39,7
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages");
void IMPORT __attribute__((regparm(1)))
FreePage(addr_t page)__asm__("FreePage");
void STDCALL MapPage(void *vaddr, addr_t paddr, u32_t flags)__asm__("MapPage");
 
 
void* STDCALL CreateRingBuffer(size_t size, u32_t map)__asm__("CreateRingBuffer");
91,6 → 92,48
#define pciWriteLong(tag, reg, val) \
PciWrite32(PCI_BUS_FROM_TAG(tag),PCI_DFN_FROM_TAG(tag),(reg),(val))
 
static inline int pci_read_config_byte(struct pci_dev *dev, int where,
u8 *val)
{
*val = PciRead8(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_byte(struct pci_dev *dev, int where,
u8 val)
{
PciWrite8(dev->busnr, dev->devfn, where, val);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
 
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
{
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
}
 
///////////////////////////////////////////////////////////////////////////////
 
int dbg_open(char *path);
447,4 → 490,37
 
#define rmb() asm volatile("lfence":::"memory")
 
static inline void *vzalloc(unsigned long size)
{
void *mem;
 
mem = KernelAlloc(size);
if(mem)
memset(mem, 0, size);
 
return mem;
};
 
static inline void vfree(void *addr)
{
KernelFree(addr);
}
 
static inline int power_supply_is_system_supplied(void) { return -1; }
 
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
 
static void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
 
#endif
/drivers/video/drm/drm_crtc.c
31,16 → 31,12
*/
#include <linux/list.h>
#include <linux/slab.h>
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
 
struct drm_prop_enum_list {
int type;
char *name;
};
 
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
162,6 → 158,7
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
};
 
static struct drm_prop_enum_list drm_encoder_enum_list[] =
170,6 → 167,7
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
 
char *drm_get_encoder_name(struct drm_encoder *encoder)
228,7 → 226,7
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
return -EINVAL;
return -ENOMEM;
}
 
mutex_lock(&dev->mode_config.idr_mutex);
236,6 → 234,8
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
else if (ret)
return ret;
 
obj->id = new_id;
obj->type = obj_type;
294,9 → 294,8
int ret;
 
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret) {
if (ret)
return ret;
}
 
fb->dev = dev;
fb->funcs = funcs;
320,23 → 319,13
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
 
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
set.fb = NULL;
ret = crtc->funcs->set_config(&set);
if (ret)
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
 
/*
* This could be moved to drm_framebuffer_remove(), but for
* debugging is nice to keep around the list of fb's that are
* no longer associated w/ a drm_file but are not unreferenced
* yet. (i915 and omapdrm have debugfs files which will show
* this.)
*/
drm_mode_object_put(dev, &fb->base);
list_del(&fb->head);
dev->mode_config.num_fb--;
343,6 → 332,10
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
 
 
 
 
 
/**
* drm_crtc_init - Initialise a new CRTC object
* @dev: DRM device
350,22 → 343,37
* @funcs: callbacks for the new CRTC
*
* LOCKING:
* Caller must hold mode config lock.
* Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
* RETURNS:
* Zero on success, error code on failure.
*/
void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
int ret;
 
crtc->dev = dev;
crtc->funcs = funcs;
crtc->invert_dimensions = false;
 
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
 
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
goto out;
 
crtc->base.properties = &crtc->properties;
 
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
 
425,7 → 433,7
struct drm_display_mode *mode)
{
list_del(&mode->head);
kfree(mode);
drm_mode_destroy(connector->dev, mode);
}
EXPORT_SYMBOL(drm_mode_remove);
 
437,21 → 445,30
* @name: user visible name of the connector
*
* LOCKING:
* Caller must hold @dev's mode_config lock.
* Takes mode config lock.
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
* RETURNS:
* Zero on success, error code on failure.
*/
void drm_connector_init(struct drm_device *dev,
int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type)
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
goto out;
 
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
463,13 → 480,18
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
 
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_connector_attach_property(connector,
dev->mode_config.edid_property, 0);
dev->mode_config.edid_property,
0);
 
drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_connector_init);
 
478,7 → 500,7
* @connector: connector to cleanup
*
* LOCKING:
* Caller must hold @dev's mode_config lock.
* Takes mode config lock.
*
* Cleans up the connector but doesn't free the object.
*/
504,16 → 526,31
}
EXPORT_SYMBOL(drm_connector_cleanup);
 
void drm_encoder_init(struct drm_device *dev,
void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
 
/* taking the mode config mutex ends up in a clash with sysfs */
// list_for_each_entry(connector, &dev->mode_config.connector_list, head)
// drm_sysfs_connector_remove(connector);
 
}
EXPORT_SYMBOL(drm_connector_unplug_all);
 
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type)
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
goto out;
 
encoder->dev = dev;
 
drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
 
520,7 → 557,10
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
 
535,6 → 575,70
}
EXPORT_SYMBOL(drm_encoder_cleanup);
 
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
bool priv)
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
goto out;
 
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
ret = -ENOMEM;
goto out;
}
 
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
 
/* private planes are not exposed to userspace, but depending on
* display hardware, might be convenient to allow sharing programming
* for the scanout engine with the crtc implementation.
*/
if (!priv) {
list_add_tail(&plane->head, &dev->mode_config.plane_list);
dev->mode_config.num_plane++;
} else {
INIT_LIST_HEAD(&plane->head);
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_plane_init);
 
void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
 
mutex_lock(&dev->mode_config.mutex);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
if (!list_empty(&plane->head)) {
list_del(&plane->head);
dev->mode_config.num_plane--;
}
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_plane_cleanup);
 
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
555,7 → 659,11
if (!nmode)
return NULL;
 
drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
kfree(nmode);
return NULL;
}
 
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
572,6 → 680,9
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
if (!mode)
return;
 
drm_mode_object_put(dev, &mode->base);
 
kfree(mode);
582,7 → 693,6
{
struct drm_property *edid;
struct drm_property *dpms;
int i;
 
/*
* Standard properties (apply to all connectors)
592,11 → 702,9
"EDID", 0);
dev->mode_config.edid_property = edid;
 
dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"DPMS", ARRAY_SIZE(drm_dpms_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
drm_dpms_enum_list[i].name);
dpms = drm_property_create_enum(dev, 0,
"DPMS", drm_dpms_enum_list,
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
 
return 0;
612,30 → 720,21
{
struct drm_property *dvi_i_selector;
struct drm_property *dvi_i_subconnector;
int i;
 
if (dev->mode_config.dvi_i_select_subconnector_property)
return 0;
 
dvi_i_selector =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
drm_property_create_enum(dev, 0,
"select subconnector",
drm_dvi_i_select_enum_list,
ARRAY_SIZE(drm_dvi_i_select_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
drm_property_add_enum(dvi_i_selector, i,
drm_dvi_i_select_enum_list[i].type,
drm_dvi_i_select_enum_list[i].name);
dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
 
dvi_i_subconnector =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_dvi_i_subconnector_enum_list,
ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
drm_property_add_enum(dvi_i_subconnector, i,
drm_dvi_i_subconnector_enum_list[i].type,
drm_dvi_i_subconnector_enum_list[i].name);
dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
 
return 0;
666,23 → 765,17
/*
* Basic connector properties
*/
tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
tv_selector = drm_property_create_enum(dev, 0,
"select subconnector",
drm_tv_select_enum_list,
ARRAY_SIZE(drm_tv_select_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
drm_property_add_enum(tv_selector, i,
drm_tv_select_enum_list[i].type,
drm_tv_select_enum_list[i].name);
dev->mode_config.tv_select_subconnector_property = tv_selector;
 
tv_subconnector =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE, "subconnector",
drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_tv_subconnector_enum_list,
ARRAY_SIZE(drm_tv_subconnector_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
drm_property_add_enum(tv_subconnector, i,
drm_tv_subconnector_enum_list[i].type,
drm_tv_subconnector_enum_list[i].name);
dev->mode_config.tv_subconnector_property = tv_subconnector;
 
/*
689,28 → 782,16
* Other, TV specific properties: margins & TV modes.
*/
dev->mode_config.tv_left_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"left margin", 2);
dev->mode_config.tv_left_margin_property->values[0] = 0;
dev->mode_config.tv_left_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "left margin", 0, 100);
 
dev->mode_config.tv_right_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"right margin", 2);
dev->mode_config.tv_right_margin_property->values[0] = 0;
dev->mode_config.tv_right_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "right margin", 0, 100);
 
dev->mode_config.tv_top_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"top margin", 2);
dev->mode_config.tv_top_margin_property->values[0] = 0;
dev->mode_config.tv_top_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "top margin", 0, 100);
 
dev->mode_config.tv_bottom_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"bottom margin", 2);
dev->mode_config.tv_bottom_margin_property->values[0] = 0;
dev->mode_config.tv_bottom_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
 
dev->mode_config.tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
720,40 → 801,22
i, modes[i]);
 
dev->mode_config.tv_brightness_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"brightness", 2);
dev->mode_config.tv_brightness_property->values[0] = 0;
dev->mode_config.tv_brightness_property->values[1] = 100;
drm_property_create_range(dev, 0, "brightness", 0, 100);
 
dev->mode_config.tv_contrast_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"contrast", 2);
dev->mode_config.tv_contrast_property->values[0] = 0;
dev->mode_config.tv_contrast_property->values[1] = 100;
drm_property_create_range(dev, 0, "contrast", 0, 100);
 
dev->mode_config.tv_flicker_reduction_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"flicker reduction", 2);
dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
 
dev->mode_config.tv_overscan_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"overscan", 2);
dev->mode_config.tv_overscan_property->values[0] = 0;
dev->mode_config.tv_overscan_property->values[1] = 100;
drm_property_create_range(dev, 0, "overscan", 0, 100);
 
dev->mode_config.tv_saturation_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"saturation", 2);
dev->mode_config.tv_saturation_property->values[0] = 0;
dev->mode_config.tv_saturation_property->values[1] = 100;
drm_property_create_range(dev, 0, "saturation", 0, 100);
 
dev->mode_config.tv_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"hue", 2);
dev->mode_config.tv_hue_property->values[0] = 0;
dev->mode_config.tv_hue_property->values[1] = 100;
drm_property_create_range(dev, 0, "hue", 0, 100);
 
return 0;
}
769,18 → 832,14
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
struct drm_property *scaling_mode;
int i;
 
if (dev->mode_config.scaling_mode_property)
return 0;
 
scaling_mode =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
drm_property_create_enum(dev, 0, "scaling mode",
drm_scaling_mode_enum_list,
ARRAY_SIZE(drm_scaling_mode_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
drm_property_add_enum(scaling_mode, i,
drm_scaling_mode_enum_list[i].type,
drm_scaling_mode_enum_list[i].name);
 
dev->mode_config.scaling_mode_property = scaling_mode;
 
798,18 → 857,14
int drm_mode_create_dithering_property(struct drm_device *dev)
{
struct drm_property *dithering_mode;
int i;
 
if (dev->mode_config.dithering_mode_property)
return 0;
 
dithering_mode =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
drm_property_create_enum(dev, 0, "dithering",
drm_dithering_mode_enum_list,
ARRAY_SIZE(drm_dithering_mode_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
drm_property_add_enum(dithering_mode, i,
drm_dithering_mode_enum_list[i].type,
drm_dithering_mode_enum_list[i].name);
dev->mode_config.dithering_mode_property = dithering_mode;
 
return 0;
826,20 → 881,15
int drm_mode_create_dirty_info_property(struct drm_device *dev)
{
struct drm_property *dirty_info;
int i;
 
if (dev->mode_config.dirty_info_property)
return 0;
 
dirty_info =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"dirty",
drm_dirty_info_enum_list,
ARRAY_SIZE(drm_dirty_info_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
drm_property_add_enum(dirty_info, i,
drm_dirty_info_enum_list[i].type,
drm_dirty_info_enum_list[i].name);
dev->mode_config.dirty_info_property = dirty_info;
 
return 0;
866,6 → 916,7
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
 
mutex_lock(&dev->mode_config.mutex);
922,6 → 973,7
 
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
/**
* drm_mode_config_cleanup - free up DRM mode_config info
942,6 → 994,7
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_plane *plane, *plt;
 
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
958,8 → 1011,9
drm_property_destroy(dev, property);
}
 
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
fb->funcs->destroy(fb);
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
}
 
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
966,6 → 1020,8
crtc->funcs->destroy(crtc);
}
 
idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
 
980,9 → 1036,16
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
struct drm_display_mode *in)
static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in)
{
WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
"timing values too large for mode info\n");
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
1011,10 → 1074,16
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
* RETURNS:
* Zero on success, errno on failure.
*/
void drm_crtc_convert_umode(struct drm_display_mode *out,
struct drm_mode_modeinfo *in)
static int drm_crtc_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
1031,6 → 1100,8
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
return 0;
}
 
 
1231,7 → 1302,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Construct a CRTC configuration structure to return to the user.
*
1291,7 → 1362,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Construct a connector configuration structure to return to the user.
*
1336,11 → 1407,7
}
connector = obj_to_connector(obj);
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
props_count++;
}
}
props_count = connector->properties.count;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
1376,7 → 1443,7
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
1391,17 → 1458,16
 
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (put_user(connector->property_ids[i],
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < connector->properties.count; i++) {
if (put_user(connector->properties.ids[i],
prop_ptr + copied)) {
ret = -EFAULT;
goto out;
}
 
if (put_user(connector->property_values[i],
if (put_user(connector->properties.values[i],
prop_values + copied)) {
ret = -EFAULT;
goto out;
1409,12 → 1475,11
copied++;
}
}
}
out_resp->count_props = props_count;
 
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
1468,6 → 1533,254
}
 
/**
* drm_mode_getplane_res - get plane info
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane_res *plane_resp = data;
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int copied = 0, ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
config = &dev->mode_config;
 
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (config->num_plane &&
(plane_resp->count_planes >= config->num_plane)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
list_for_each_entry(plane, &config->plane_list, head) {
if (put_user(plane->base.id, plane_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
plane_resp->count_planes = config->num_plane;
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
/**
* drm_mode_getplane - get plane info
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -ENOENT;
goto out;
}
plane = obj_to_plane(obj);
 
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
else
plane_resp->crtc_id = 0;
 
if (plane->fb)
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
 
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = plane->possible_crtcs;
plane_resp->gamma_size = plane->gamma_size;
 
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (plane->format_count &&
(plane_resp->count_format_types >= plane->format_count)) {
format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
if (copy_to_user(format_ptr,
plane->format_types,
sizeof(uint32_t) * plane->format_count)) {
ret = -EFAULT;
goto out;
}
}
plane_resp->count_format_types = plane->format_count;
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
/**
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
* @file_prive: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
obj = drm_mode_object_find(dev, plane_req->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
ret = -ENOENT;
goto out;
}
plane = obj_to_plane(obj);
 
/* No fb means shut it down */
if (!plane_req->fb_id) {
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
goto out;
}
 
obj = drm_mode_object_find(dev, plane_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
 
obj = drm_mode_object_find(dev, plane_req->fb_id,
DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
fb = obj_to_fb(obj);
 
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
break;
if (i == plane->format_count) {
DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
ret = -EINVAL;
goto out;
}
 
fb_width = fb->width << 16;
fb_height = fb->height << 16;
 
/* Make sure source coordinates are inside the fb. */
if (plane_req->src_w > fb_width ||
plane_req->src_x > fb_width - plane_req->src_w ||
plane_req->src_h > fb_height ||
plane_req->src_y > fb_height - plane_req->src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
plane_req->src_w >> 16,
((plane_req->src_w & 0xffff) * 15625) >> 10,
plane_req->src_h >> 16,
((plane_req->src_h & 0xffff) * 15625) >> 10,
plane_req->src_x >> 16,
((plane_req->src_x & 0xffff) * 15625) >> 10,
plane_req->src_y >> 16,
((plane_req->src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
goto out;
}
 
/* Give drivers some help against integer overflows */
if (plane_req->crtc_w > INT_MAX ||
plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
plane_req->crtc_h > INT_MAX ||
plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
plane_req->crtc_w, plane_req->crtc_h,
plane_req->crtc_x, plane_req->crtc_y);
ret = -ERANGE;
goto out;
}
 
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
 
/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
1475,7 → 1788,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Build a new CRTC configuration based on user request.
*
1490,18 → 1803,22
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc, *crtcfb;
struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
int ret = 0;
int ret;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
/* For some reason crtc x/y offsets are signed internally. */
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
return -ERANGE;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
1514,17 → 1831,16
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
if (crtc_req->mode_valid) {
int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
list_for_each_entry(crtcfb,
&dev->mode_config.crtc_list, head) {
if (crtcfb == crtc) {
DRM_DEBUG_KMS("Using current fb for "
"setmode\n");
if (!crtc->fb) {
DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
fb = crtc->fb;
}
}
} else {
obj = drm_mode_object_find(dev, crtc_req->fb_id,
DRM_MODE_OBJECT_FB);
1538,8 → 1854,36
}
 
mode = drm_mode_create(dev);
drm_crtc_convert_umode(mode, &crtc_req->mode);
if (!mode) {
ret = -ENOMEM;
goto out;
}
 
ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
goto out;
}
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
if (hdisplay > fb->width ||
vdisplay > fb->height ||
crtc_req->x > fb->width - hdisplay ||
crtc_req->y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height,
hdisplay, vdisplay, crtc_req->x, crtc_req->y,
crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
}
 
if (crtc_req->count_connectors == 0 && mode) {
1573,7 → 1917,7
}
 
for (i = 0; i < crtc_req->count_connectors; i++) {
set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
1607,6 → 1951,7
 
out:
kfree(connector_set);
drm_mode_destroy(dev, mode);
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
1622,10 → 1967,8
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if (!req->flags) {
DRM_ERROR("no operation set\n");
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
}
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
1638,7 → 1981,6
 
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
DRM_ERROR("crtc does not support cursor\n");
ret = -ENXIO;
goto out;
}
1651,7 → 1993,6
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
1660,7 → 2001,43
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
#endif
/* Original addfb only supported RGB formats, so figure out which one */
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
uint32_t fmt;
 
switch (bpp) {
case 8:
fmt = DRM_FORMAT_RGB332;
break;
case 16:
if (depth == 15)
fmt = DRM_FORMAT_XRGB1555;
else
fmt = DRM_FORMAT_RGB565;
break;
case 24:
fmt = DRM_FORMAT_RGB888;
break;
case 32:
if (depth == 24)
fmt = DRM_FORMAT_XRGB8888;
else if (depth == 30)
fmt = DRM_FORMAT_XRGB2101010;
else
fmt = DRM_FORMAT_ARGB8888;
break;
default:
DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
fmt = DRM_FORMAT_XRGB8888;
break;
}
 
return fmt;
}
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
#if 0
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
1681,31 → 2058,210
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
 
/* Use new struct with format internally */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
r.pitches[0] = or->pitch;
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if ((config->min_width > r.width) || (r.width > config->max_width))
return -EINVAL;
 
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
 
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
 
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_XBGR4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_BGRX4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_BGRA4444:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_AYUV:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 0;
default:
return -EINVAL;
}
}
 
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
 
ret = format_check(r);
if (ret) {
DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
return ret;
}
 
hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
num_planes = drm_format_num_planes(r->pixel_format);
 
if (r->width == 0 || r->width % hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
return -EINVAL;
}
 
if (r->height == 0 || r->height % vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
 
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
 
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
 
if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
}
 
return 0;
}
 
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Add a new FB to the specified CRTC, given a user request with format.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("mode new framebuffer width not within limits\n");
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_ERROR("mode new framebuffer height not within limits\n");
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return -EINVAL;
}
 
ret = framebuffer_check(r);
if (ret)
return ret;
 
mutex_lock(&dev->mode_config.mutex);
 
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_ERROR("could not create framebuffer\n");
DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
1753,7 → 2309,6
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
1764,17 → 2319,12
found = 1;
 
if (!found) {
DRM_ERROR("tried to remove a fb that we didn't own\n");
ret = -EINVAL;
goto out;
}
 
/* TODO release all crtc connected to the framebuffer */
/* TODO unhock the destructor from the buffer object */
drm_framebuffer_remove(fb);
 
list_del(&fb->filp_head);
fb->funcs->destroy(fb);
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
1788,7 → 2338,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Lookup the FB given its ID and return info about it.
*
1811,7 → 2361,6
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
1821,7 → 2370,7
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitch;
r->pitch = fb->pitches[0];
fb->funcs->create_handle(fb, file_priv, &r->handle);
 
out:
1839,7 → 2388,7
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
1847,7 → 2396,6
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
1854,7 → 2402,7
fb = obj_to_fb(obj);
 
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
 
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
1870,6 → 2418,10
}
 
if (num_clips && clips_ptr) {
if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
ret = -EINVAL;
goto out_err1;
}
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
1921,8 → 2473,7
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
list_del(&fb->filp_head);
fb->funcs->destroy(fb);
drm_framebuffer_remove(fb);
}
mutex_unlock(&dev->mode_config.mutex);
}
1936,38 → 2487,48
*
* Add @mode to @connector's user mode list.
*/
static int drm_mode_attachmode(struct drm_device *dev,
static void drm_mode_attachmode(struct drm_device *dev,
struct drm_connector *connector,
struct drm_display_mode *mode)
{
int ret = 0;
 
list_add_tail(&mode->head, &connector->user_modes);
return ret;
}
 
int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct drm_connector *connector;
int ret = 0;
struct drm_display_mode *dup_mode;
int need_dup = 0;
struct drm_display_mode *dup_mode, *next;
LIST_HEAD(list);
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
break;
continue;
if (connector->encoder->crtc == crtc) {
if (need_dup)
dup_mode = drm_mode_duplicate(dev, mode);
else
dup_mode = mode;
ret = drm_mode_attachmode(dev, connector, dup_mode);
if (ret)
return ret;
need_dup = 1;
if (!dup_mode) {
ret = -ENOMEM;
goto out;
}
list_add_tail(&dup_mode->head, &list);
}
}
return 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
if (connector->encoder->crtc == crtc)
list_move_tail(list.next, &connector->user_modes);
}
 
WARN_ON(!list_empty(&list));
 
out:
list_for_each_entry_safe(dup_mode, next, &list, head)
drm_mode_destroy(dev, dup_mode);
 
return ret;
}
EXPORT_SYMBOL(drm_mode_attachmode_crtc);
 
2028,7 → 2589,7
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2048,9 → 2609,14
goto out;
}
 
drm_crtc_convert_umode(mode, umode);
ret = drm_crtc_convert_umode(mode, umode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
drm_mode_destroy(dev, mode);
goto out;
}
 
ret = drm_mode_attachmode(dev, connector, mode);
drm_mode_attachmode(dev, connector, mode);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
2077,7 → 2643,7
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2091,7 → 2657,12
}
connector = obj_to_connector(obj);
 
drm_crtc_convert_umode(&mode, umode);
ret = drm_crtc_convert_umode(&mode, umode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
goto out;
}
 
ret = drm_mode_detachmode(dev, connector, &mode);
out:
mutex_unlock(&dev->mode_config.mutex);
2103,6 → 2674,7
const char *name, int num_values)
{
struct drm_property *property = NULL;
int ret;
 
property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
if (!property)
2114,30 → 2686,118
goto fail;
}
 
drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
if (ret)
goto fail;
 
property->flags = flags;
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_blob_list);
 
if (name)
if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
property->name[DRM_PROP_NAME_LEN-1] = '\0';
}
 
list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
fail:
kfree(property->values);
kfree(property);
return NULL;
}
EXPORT_SYMBOL(drm_property_create);
 
struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
int num_values)
{
struct drm_property *property;
int i, ret;
 
flags |= DRM_MODE_PROP_ENUM;
 
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
 
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property, i,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
 
return property;
}
EXPORT_SYMBOL(drm_property_create_enum);
 
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values)
{
struct drm_property *property;
int i, ret;
 
flags |= DRM_MODE_PROP_BITMASK;
 
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
 
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property, i,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
 
return property;
}
EXPORT_SYMBOL(drm_property_create_bitmask);
 
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
{
struct drm_property *property;
 
flags |= DRM_MODE_PROP_RANGE;
 
property = drm_property_create(dev, flags, name, 2);
if (!property)
return NULL;
 
property->values[0] = min;
property->values[1] = max;
 
return property;
}
EXPORT_SYMBOL(drm_property_create_range);
 
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
struct drm_property_enum *prop_enum;
 
if (!(property->flags & DRM_MODE_PROP_ENUM))
if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
return -EINVAL;
 
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
return -EINVAL;
 
if (!list_empty(&property->enum_blob_list)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (prop_enum->value == value) {
2179,22 → 2839,10
}
EXPORT_SYMBOL(drm_property_destroy);
 
int drm_connector_attach_property(struct drm_connector *connector,
void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
int i;
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == 0) {
connector->property_ids[i] = property->base.id;
connector->property_values[i] = init_val;
break;
}
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
 
2201,38 → 2849,68
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
return drm_object_property_set_value(&connector->base, property, value);
}
EXPORT_SYMBOL(drm_connector_property_set_value);
 
int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property, uint64_t *val)
{
return drm_object_property_get_value(&connector->base, property, val);
}
EXPORT_SYMBOL(drm_connector_property_get_value);
 
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
{
int count = obj->properties->count;
 
if (count == DRM_OBJECT_MAX_PROPERTY) {
WARN(1, "Failed to attach object property (type: 0x%x). Please "
"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
"you see this message on the same object type.\n",
obj->type);
return;
}
 
obj->properties->ids[count] = property->base.id;
obj->properties->values[count] = init_val;
obj->properties->count++;
}
EXPORT_SYMBOL(drm_object_attach_property);
 
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
int i;
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
connector->property_values[i] = value;
break;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
obj->properties->values[i] = val;
return 0;
}
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_property_set_value);
EXPORT_SYMBOL(drm_object_property_set_value);
 
int drm_connector_property_get_value(struct drm_connector *connector,
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
*val = connector->property_values[i];
break;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
*val = obj->properties->values[i];
return 0;
}
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_property_get_value);
EXPORT_SYMBOL(drm_object_property_get_value);
 
#if 0
int drm_mode_getproperty_ioctl(struct drm_device *dev,
2249,7 → 2927,7
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
uint32_t *blob_id_ptr;
uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
 
2264,7 → 2942,7
}
property = obj_to_property(obj);
 
if (property->flags & DRM_MODE_PROP_ENUM) {
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
2279,7 → 2957,7
out_resp->flags = property->flags;
 
if ((out_resp->count_values >= value_count) && value_count) {
values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
2289,10 → 2967,10
}
out_resp->count_values = value_count;
 
if (property->flags & DRM_MODE_PROP_ENUM) {
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
2314,8 → 2992,8
if (property->flags & DRM_MODE_PROP_BLOB) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
 
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
2343,6 → 3021,7
void *data)
{
struct drm_property_blob *blob;
int ret;
 
if (!length || !data)
return NULL;
2351,13 → 3030,16
if (!blob)
return NULL;
 
blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
if (ret) {
kfree(blob);
return NULL;
}
 
blob->length = length;
 
memcpy(blob->data, data, length);
 
drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
 
list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
return blob;
}
2378,7 → 3060,7
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
void *blob_ptr;
void __user *blob_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2392,7 → 3074,7
blob = obj_to_blob(obj);
 
if (out_resp->length == blob->length) {
blob_ptr = (void *)(unsigned long)out_resp->data;
blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
2410,7 → 3092,7
struct edid *edid)
{
struct drm_device *dev = connector->dev;
int ret = 0, size;
int ret, size;
 
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
2435,15 → 3117,69
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
#if 0
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
 
static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
struct drm_mode_connector_set_property *out_resp = data;
int ret = -EINVAL;
struct drm_connector *connector = obj_to_connector(obj);
 
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int)value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
 
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, value);
return ret;
}
 
static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_crtc *crtc = obj_to_crtc(obj);
 
if (crtc->funcs->set_property)
ret = crtc->funcs->set_property(crtc, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
 
return ret;
}
 
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_plane *plane = obj_to_plane(obj);
 
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
 
return ret;
}
 
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
struct drm_property *property;
struct drm_connector *connector;
int ret = -EINVAL;
int ret = 0;
int i;
int copied = 0;
int props_count = 0;
uint32_t __user *props_ptr;
uint64_t __user *prop_values_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2450,60 → 3186,95
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -EINVAL;
goto out;
}
connector = obj_to_connector(obj);
if (!obj->properties) {
ret = -EINVAL;
goto out;
}
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == out_resp->prop_id)
break;
props_count = obj->properties->count;
 
/* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it. */
if ((arg->count_props >= props_count) && props_count) {
copied = 0;
props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
prop_values_ptr = (uint64_t __user *)(unsigned long)
(arg->prop_values_ptr);
for (i = 0; i < props_count; i++) {
if (put_user(obj->properties->ids[i],
props_ptr + copied)) {
ret = -EFAULT;
goto out;
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY) {
if (put_user(obj->properties->values[i],
prop_values_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
arg->count_props = props_count;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_set_property *arg = data;
struct drm_mode_object *arg_obj;
struct drm_mode_object *prop_obj;
struct drm_property *property;
int ret = -EINVAL;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
goto out;
}
property = obj_to_property(obj);
if (!arg_obj->properties)
goto out;
 
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
for (i = 0; i < arg_obj->properties->count; i++)
if (arg_obj->properties->ids[i] == arg->prop_id)
break;
 
if (i == arg_obj->properties->count)
goto out;
 
if (property->flags & DRM_MODE_PROP_RANGE) {
if (out_resp->value < property->values[0])
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
if (!prop_obj)
goto out;
property = obj_to_property(prop_obj);
 
if (out_resp->value > property->values[1])
if (!drm_property_change_is_valid(property, arg->value))
goto out;
} else {
int found = 0;
for (i = 0; i < property->num_values; i++) {
if (property->values[i] == out_resp->value) {
found = 1;
 
switch (arg_obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_mode_connector_set_obj_prop(arg_obj, property,
arg->value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
break;
}
}
if (!found) {
goto out;
}
}
 
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int) out_resp->value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
 
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
2540,7 → 3311,7
}
EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
 
bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
crtc->gamma_size = gamma_size;
2548,10 → 3319,10
crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return false;
return -ENOMEM;
}
 
return true;
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
 
2577,6 → 3348,11
}
crtc = obj_to_crtc(obj);
 
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
goto out;
}
 
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
2682,3 → 3458,211
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
/*
* Just need to support RGB formats here for compat with code that doesn't
* use pixel formats directly yet.
*/
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
{
switch (format) {
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
*bpp = 8;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
*depth = 15;
*bpp = 16;
break;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
*depth = 16;
*bpp = 16;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
*depth = 24;
*bpp = 24;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
*depth = 24;
*bpp = 32;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
*depth = 30;
*bpp = 32;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
*depth = 32;
*bpp = 32;
break;
default:
DRM_DEBUG_KMS("unsupported pixel format\n");
*depth = 0;
*bpp = 0;
break;
}
}
EXPORT_SYMBOL(drm_fb_get_bpp_depth);
 
/**
* drm_format_num_planes - get the number of planes for format
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The number of planes used by the specified pixel format.
*/
int drm_format_num_planes(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 3;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(drm_format_num_planes);
 
/**
* drm_format_plane_cpp - determine the bytes per pixel value
* @format: pixel format (DRM_FORMAT_*)
* @plane: plane index
*
* RETURNS:
* The bytes per pixel value for the specified plane.
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
unsigned int depth;
int bpp;
 
if (plane >= drm_format_num_planes(format))
return 0;
 
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
return 2;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 1;
default:
drm_fb_get_bpp_depth(format, &depth, &bpp);
return bpp >> 3;
}
}
EXPORT_SYMBOL(drm_format_plane_cpp);
 
/**
* drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The horizontal chroma subsampling factor for the
* specified pixel format.
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
 
/**
* drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The vertical chroma subsampling factor for the
* specified pixel format.
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
/drivers/video/drm/drm_crtc_helper.c
29,22 → 29,27
* Jesse Barnes <jesse.barnes@intel.com>
*/
 
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include <linux/export.h>
#include <linux/moduleparam.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
 
static bool drm_kms_helper_poll = true;
 
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
struct drm_display_mode *mode, *t;
struct drm_display_mode *mode;
 
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
return;
 
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
82,7 → 87,7
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
91,7 → 96,7
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
 
if (connector->force) {
113,7 → 118,12
goto prune;
}
 
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
count = (*connector_funcs->get_modes)(connector);
 
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (count == 0)
131,7 → 141,7
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_validate_flag(connector, mode_flags);
 
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = connector_funcs->mode_valid(connector,
mode);
147,7 → 157,7
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
258,6 → 268,7
crtc->fb = NULL;
}
}
 
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
342,12 → 353,20
struct drm_encoder *encoder;
bool ret = true;
 
ENTER();
 
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
 
printf("crtc->enabled\n");
 
adjusted_mode = drm_mode_duplicate(dev, mode);
if (!adjusted_mode)
return false;
 
printf("adjusted_mode\n");
 
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
371,11 → 390,16
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
goto done;
}
}
 
printf("list_for_each_entry\n");
printf("mode_fixup %x\n", crtc_funcs->mode_fixup);
 
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
445,11 → 469,37
crtc->y = saved_y;
}
 
LEAVE();
 
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
static int
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
 
/* Decouple all encoders and their attached connectors from this crtc */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder != encoder)
continue;
 
connector->encoder = NULL;
}
}
 
drm_helper_disable_unused_functions(dev);
return 0;
}
 
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
478,7 → 528,8
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
struct drm_mode_set save_set;
int ret;
int i;
 
DRM_DEBUG_KMS("\n");
503,8 → 554,7
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
set->mode = NULL;
set->num_connectors = 0;
return drm_crtc_helper_disable(set->crtc);
}
 
dev = set->crtc->dev;
550,6 → 600,12
save_connectors[count++] = *connector;
}
 
save_set.crtc = set->crtc;
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->fb;
 
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
674,7 → 730,7
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
715,6 → 771,12
*connector = save_connectors[count++];
}
 
/* Try to restore the config */
if (mode_changed &&
!drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
 
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
805,13 → 867,19
EXPORT_SYMBOL(drm_helper_connector_dpms);
 
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
 
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
fb->pitch = mode_cmd->pitch;
fb->bits_per_pixel = mode_cmd->bpp;
fb->depth = mode_cmd->depth;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
}
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
 
return 0;
}
913,7 → 981,7
}
 
if (repoll)
queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
void drm_kms_helper_poll_disable(struct drm_device *dev)
938,7 → 1006,7
}
 
if (poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
965,9 → 1033,8
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
if (drm_kms_helper_poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
 
 
#endif
/drivers/video/drm/drm_dp_i2c_helper.c
27,8 → 27,8
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include "drm_dp_helper.h"
#include "drmP.h"
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
 
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
/drivers/video/drm/drm_edid.c
30,8 → 30,9
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include "drmP.h"
#include "drm_edid.h"
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "drm_edid_modes.h"
 
#define version_greater(edid, maj, min) \
65,6 → 66,8
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
 
struct detailed_mode_closure {
struct drm_connector *connector;
80,10 → 83,13
#define LEVEL_CVT 3
 
static struct edid_quirk {
char *vendor;
char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
/* ASUS VW222S */
{ "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
119,6 → 125,9
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
 
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
 
/*** DDC fetch and block validation ***/
143,22 → 152,28
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
 
static int edid_fixup __read_mostly = 6;
//module_param_named(edid_fixup, edid_fixup, int, 0400);
//MODULE_PARM_DESC(edid_fixup,
// "Minimum number of valid EDID header bytes (0-8, default 6)");
 
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
static bool
drm_edid_block_valid(u8 *raw_edid)
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
 
if (raw_edid[0] == 0x00) {
if (edid_fixup > 8 || edid_fixup < 0)
edid_fixup = 6;
 
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
201,6 → 216,7
}
return 0;
}
EXPORT_SYMBOL(drm_edid_block_valid);
 
/**
* drm_edid_is_valid - sanity check EDID data
217,7 → 233,7
return false;
 
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
return false;
 
return true;
224,7 → 240,6
}
EXPORT_SYMBOL(drm_edid_is_valid);
 
#define DDC_ADDR 0x50
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
241,6 → 256,8
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
unsigned char segment = block >> 1;
unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
 
/* The core i2c driver will automatically retry the transfer if the
252,6 → 269,11
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_SEGMENT_ADDR,
.flags = 0,
.len = 1,
.buf = &segment,
}, {
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
263,10 → 285,21
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
} while (ret != 2 && --retries);
 
return ret == 2 ? 0 : -1;
/*
* Avoid sending the segment addr to not upset non-compliant ddc
* monitors.
*/
ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
 
if (ret == -ENXIO) {
DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
adapter->name);
break;
}
} while (ret != xfers && --retries);
 
return ret == xfers ? 0 : -1;
}
 
static bool drm_edid_is_zero(u8 *in_edid, int length)
283,9 → 316,10
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
size_t alloc_size;
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
size_t alloc_size;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
294,7 → 328,7
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block))
if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
326,7 → 360,7
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
valid_extensions++;
break;
}
351,8 → 385,11
return block;
 
carp:
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
}
connector->bad_edid_counter++;
 
out:
kfree(block);
365,7 → 402,7
* \param adapter : i2c device adaptor
* \return 1 on success
*/
static bool
bool
drm_probe_ddc(struct i2c_adapter *adapter)
{
unsigned char out;
372,6 → 409,7
 
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
EXPORT_SYMBOL(drm_probe_ddc);
 
/**
* drm_get_edid - get EDID data, if available
391,10 → 429,7
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
connector->display_info.raw_edid = (char *)edid;
 
return edid;
 
}
EXPORT_SYMBOL(drm_get_edid);
 
490,23 → 525,47
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
 
/*
* drm_mode_find_dmt - Create a copy of a mode if present in DMT
* @dev: Device to duplicate against
* @hsize: Mode width
* @vsize: Mode height
* @fresh: Mode refresh rate
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
* Return a newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
int hsize, int vsize, int fresh,
bool rb)
{
struct drm_display_mode *mode = NULL;
int i;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
/* get the expected default mode */
mode = drm_mode_duplicate(dev, ptr);
break;
if (hsize != ptr->hdisplay)
continue;
if (vsize != ptr->vdisplay)
continue;
if (fresh != drm_mode_vrefresh(ptr))
continue;
if (rb != mode_is_rb(ptr))
continue;
 
return drm_mode_duplicate(dev, ptr);
}
}
return mode;
 
return NULL;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
 
516,25 → 575,10
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
u8 rev = ext[0x01], d = ext[0x02];
u8 d = ext[0x02];
u8 *det_base = ext + d;
 
switch (rev) {
case 0:
/* can't happen */
return;
case 1:
/* have to infer how many blocks we have, check pixel clock */
for (i = 0; i < 6; i++)
if (det_base[18*i] || det_base[18*i+1])
n++;
break;
default:
/* explicit count */
n = min(ext[0x03] & 0x0f, 6);
break;
}
 
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
593,7 → 637,7
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
bool ret;
bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
750,10 → 794,17
}
 
/* check whether it can be found in default mode table */
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (drm_monitor_supports_rb(edid)) {
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
true);
if (mode)
return mode;
}
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
 
/* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
767,8 → 818,10
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
kfree(mode);
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
871,12 → 924,19
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
 
if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
 
goto set_size;
}
 
mode = drm_mode_create(dev);
if (!mode)
return NULL;
 
mode->type = DRM_MODE_TYPE_DRIVER;
 
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
 
900,8 → 960,6
 
drm_mode_do_interlace_quirk(mode, pt);
 
drm_mode_set_name(mode);
 
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
911,6 → 969,7
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
924,19 → 983,13
mode->height_mm = edid->height_cm * 10;
}
 
mode->type = DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
 
return mode;
}
 
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
 
static bool
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
1013,12 → 1066,26
return true;
}
 
/*
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static bool valid_inferred_mode(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_display_mode *m;
bool ok = false;
 
list_for_each_entry(m, &connector->probed_modes, head) {
if (mode->hdisplay == m->hdisplay &&
mode->vdisplay == m->vdisplay &&
drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
return false; /* duplicated */
if (mode->hdisplay <= m->hdisplay &&
mode->vdisplay <= m->vdisplay)
ok = true;
}
return ok;
}
 
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
1026,7 → 1093,8
struct drm_device *dev = connector->dev;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
1038,17 → 1106,112
return modes;
}
 
/* fix up 1366x768 mode from 1368x768;
* GFT/CVT can't express 1366 width which isn't dividable by 8
*/
static void fixup_mode_1366x768(struct drm_display_mode *mode)
{
if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
mode->hdisplay = 1366;
mode->hsync_start--;
mode->hsync_end--;
drm_mode_set_name(mode);
}
}
 
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
for (i = 0; i < num_extra_modes; i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
return modes;
 
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
 
drm_mode_probed_add(connector, newmode);
modes++;
}
 
return modes;
}
 
static int
drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
 
for (i = 0; i < num_extra_modes; i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
return modes;
 
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
 
drm_mode_probed_add(connector, newmode);
modes++;
}
 
return modes;
}
 
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct detailed_data_monitor_range *range = &data->data.range;
 
if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
if (data->type != EDID_DETAIL_MONITOR_RANGE)
return;
 
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
 
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
 
switch (range->flags) {
case 0x02: /* secondary gtf, XXX could do more */
case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
break;
case 0x04: /* cvt, only in 1.4+ */
if (!version_greater(closure->edid, 1, 3))
break;
 
closure->modes += drm_cvt_modes_for_range(closure->connector,
closure->edid,
timing);
break;
case 0x01: /* just the ranges, no formula */
default:
break;
}
}
 
static int
1081,8 → 1244,8
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
est3_modes[m].r
/*, est3_modes[m].rb */);
est3_modes[m].r,
est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
1327,8 → 1490,12
 
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
 
/**
* Search EDID for CEA extension block.
1356,7 → 1523,297
}
EXPORT_SYMBOL(drm_find_cea_extension);
 
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
u8 * mode, cea_mode;
int modes = 0;
 
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
if (cea_mode < drm_num_cea_modes) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
 
return modes;
}
 
static int
cea_db_payload_len(const u8 *db)
{
return db[0] & 0x1f;
}
 
static int
cea_db_tag(const u8 *db)
{
return db[0] >> 5;
}
 
static int
cea_revision(const u8 *cea)
{
return cea[1];
}
 
static int
cea_db_offsets(const u8 *cea, int *start, int *end)
{
/* Data block offset in CEA extension block */
*start = 4;
*end = cea[2];
if (*end == 0)
*end = 127;
if (*end < 4 || *end > 127)
return -ERANGE;
return 0;
}
 
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
int modes = 0;
 
if (cea && cea_revision(cea) >= 3) {
int i, start, end;
 
if (cea_db_offsets(cea, &start, &end))
return 0;
 
for_each_cea_db(cea, i, start, end) {
db = &cea[i];
dbl = cea_db_payload_len(db);
 
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
}
}
 
return modes;
}
 
static void
parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
{
u8 len = cea_db_payload_len(db);
 
if (len >= 6) {
connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
connector->dvi_dual = db[6] & 1;
}
if (len >= 7)
connector->max_tmds_clock = db[7] * 5;
if (len >= 8) {
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
}
if (len >= 9)
connector->video_latency[0] = db[9];
if (len >= 10)
connector->audio_latency[0] = db[10];
if (len >= 11)
connector->video_latency[1] = db[11];
if (len >= 12)
connector->audio_latency[1] = db[12];
 
DRM_LOG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
"audio latency %d %d\n",
connector->dvi_dual,
connector->max_tmds_clock,
(int) connector->latency_present[0],
(int) connector->latency_present[1],
connector->video_latency[0],
connector->video_latency[1],
connector->audio_latency[0],
connector->audio_latency[1]);
}
 
static void
monitor_name(struct detailed_timing *t, void *data)
{
if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
*(u8 **)data = t->data.other_data.data.str.str;
}
 
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
 
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
 
if (cea_db_payload_len(db) < 5)
return false;
 
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
 
return hdmi_id == HDMI_IDENTIFIER;
}
 
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
* Some ELD fields are left to the graphics driver caller:
* - Conn_Type
* - HDCP
* - Port_ID
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
uint8_t *eld = connector->eld;
u8 *cea;
u8 *name;
u8 *db;
int sad_count = 0;
int mnl;
int dbl;
 
memset(eld, 0, sizeof(connector->eld));
 
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
return;
}
 
name = NULL;
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
for (mnl = 0; name && mnl < 13; mnl++) {
if (name[mnl] == 0x0a)
break;
eld[20 + mnl] = name[mnl];
}
eld[4] = (cea[1] << 5) | mnl;
DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
 
eld[0] = 2 << 3; /* ELD version: 2 */
 
eld[16] = edid->mfg_id[0];
eld[17] = edid->mfg_id[1];
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
 
if (cea_revision(cea) >= 3) {
int i, start, end;
 
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
end = 0;
}
 
for_each_cea_db(cea, i, start, end) {
db = &cea[i];
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
if (dbl >= 1)
memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
if (dbl >= 1)
eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
if (cea_db_is_hdmi_vsdb(db))
parse_hdmi_vsdb(connector, db);
break;
default:
break;
}
}
}
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
 
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
int a, v;
 
if (!connector->latency_present[0])
return 0;
if (!connector->latency_present[1])
i = 0;
 
a = connector->audio_latency[i];
v = connector->video_latency[i];
 
/*
* HDMI/DP sink doesn't support audio or video?
*/
if (a == 255 || v == 255)
return 0;
 
/*
* Convert raw EDID values to millisecond.
* Treat unknown latency as 0ms.
*/
if (a)
a = min(2 * (a - 1), 500);
if (v)
v = min(2 * (v - 1), 500);
 
return max(v - a, 0);
}
EXPORT_SYMBOL(drm_av_sync_delay);
 
/**
* drm_select_eld - select one ELD from multiple HDMI/DP sinks
* @encoder: the encoder just changed display mode
* @mode: the adjusted display mode
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
 
return NULL;
}
EXPORT_SYMBOL(drm_select_eld);
 
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* @edid: monitor EDID information
*
1366,38 → 1823,26
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
int i, hdmi_id;
int i;
int start_offset, end_offset;
bool is_hdmi = false;
 
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
return false;
 
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
return false;
 
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for (i = start_offset; i < end_offset;
/* Increased by data block len */
i += ((edid_ext[i] & 0x1f) + 1)) {
/* Find vendor specific block */
if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
edid_ext[i + 3] << 16;
/* Find HDMI identifier */
if (hdmi_id == HDMI_IDENTIFIER)
is_hdmi = true;
break;
}
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
return true;
}
 
end:
return is_hdmi;
return false;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
1429,15 → 1874,13
goto end;
}
 
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
goto end;
 
for (i = start_offset; i < end_offset;
i += ((edid_ext[i] & 0x1f) + 1)) {
if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
has_audio = true;
for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
1469,13 → 1912,29
info->bpc = 0;
info->color_formats = 0;
 
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
if (edid->revision < 3)
return;
 
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
 
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (edid_ext) {
info->cea_rev = edid_ext[1];
 
/* The existence of a CEA block should imply RGB support */
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (edid_ext[3] & EDID_CEA_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid_ext[3] & EDID_CEA_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
 
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
 
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
1501,18 → 1960,11
break;
}
 
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
 
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return;
 
info->cea_rev = edid_ext[1];
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
 
/**
1559,6 → 2011,7
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
 
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
/drivers/video/drm/drm_edid_modes.h
24,13 → 24,12
*/
 
#include <linux/kernel.h>
#include "drmP.h"
#include "drm_edid.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
 
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
81,12 → 80,16
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@120Hz RB */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
880, 960, 0, 600, 603, 607, 636, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
106,10 → 109,18
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@120Hz RB */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
1104, 1184, 0, 768, 771, 775, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 790, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
122,6 → 133,14
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@120Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 823, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
134,6 → 153,10
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@120Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 847, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
142,6 → 165,10
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@120Hz RB */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
1360, 1440, 0, 960, 963, 967, 1017, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
154,22 → 181,42
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@120Hz RB */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
/* 1360x768@120Hz RB */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
1440, 1520, 0, 768, 771, 776, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@75Hz */
/* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@85Hz */
/* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@120Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 926, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
182,6 → 229,10
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@120Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 953, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
202,6 → 253,14
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@120Hz RB */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
214,15 → 273,23
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@120Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1729x1344@75Hz */
/* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
/* 1792x1344@120Hz RB */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
230,6 → 297,14
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@120Hz RB */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
242,6 → 317,10
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@120Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
250,6 → 329,14
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@120Hz RB */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
262,6 → 349,11
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@120Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
303,7 → 395,7
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
320,12 → 412,14
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
 
static const struct {
struct minimode {
short w;
short h;
short r;
short rb;
} est3_modes[] = {
};
 
static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
377,4 → 471,304
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
static const int num_est3_modes = ARRAY_SIZE(est3_modes);
 
static const struct minimode extra_modes[] = {
{ 1024, 576, 60, 0 },
{ 1366, 768, 60, 0 },
{ 1600, 900, 60, 0 },
{ 1680, 945, 60, 0 },
{ 1920, 1080, 60, 0 },
{ 2048, 1152, 60, 0 },
{ 2048, 1536, 60, 0 },
};
static const int num_extra_modes = ARRAY_SIZE(extra_modes);
 
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
/* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
/drivers/video/drm/drm_fb_helper.c
31,10 → 31,11
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
#include "drm_crtc_helper.h"
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
 
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
87,6 → 88,9
{
uint16_t *r_base, *g_base, *b_base;
 
if (crtc->funcs->gamma_set == NULL)
return;
 
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
95,91 → 99,32
}
 
 
static void drm_fb_helper_on(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i, j;
 
/*
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
if (!crtc->enabled)
continue;
 
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 
/* Walk the connectors & encoders on this fb turning them on */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
DRM_MODE_DPMS_ON);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct drm_encoder_helper_funcs *encoder_funcs;
 
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
mutex_unlock(&dev->mode_config.mutex);
}
 
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i, j;
 
/*
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
* For each CRTC in this fb, turn the connectors on/off.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
if (!crtc->enabled)
continue;
 
/* Walk the connectors on this fb and mark them off */
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = dpms_mode;
connector->funcs->dpms(connector, dpms_mode);
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
dpms_mode);
dev->mode_config.dpms_property, dpms_mode);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct drm_encoder_helper_funcs *encoder_funcs;
 
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, dpms_mode);
}
}
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
mutex_unlock(&dev->mode_config.mutex);
}
189,23 → 134,23
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_on(info);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
219,8 → 164,11
for (i = 0; i < helper->connector_count; i++)
kfree(helper->connector_info[i]);
kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
for (i = 0; i < helper->crtc_count; i++) {
kfree(helper->crtc_info[i].mode_set.connectors);
if (helper->crtc_info[i].mode_set.mode)
drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
}
kfree(helper->crtc_info);
}
 
229,7 → 177,6
int crtc_count, int max_conn_count)
{
struct drm_crtc *crtc;
int ret = 0;
int i;
 
fb_helper->dev = dev;
254,20 → 201,17
sizeof(struct drm_connector *),
GFP_KERNEL);
 
if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
}
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
 
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
fb_helper->crtc_info[i].crtc_id = crtc->base.id;
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
fb_helper->conn_limit = max_conn_count;
 
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
389,9 → 333,13
return -EINVAL;
 
/* Need to resize the fb object !!! */
if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
if (var->bits_per_pixel > fb->bits_per_pixel ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
"object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
548,8 → 496,41
sizes.fb_width = (unsigned)-1;
sizes.fb_height = (unsigned)-1;
 
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
if (preferred_bpp != sizes.surface_bpp) {
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
 
cmdline_mode = &fb_helper_conn->cmdline_mode;
 
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
sizes.surface_depth = 15;
sizes.surface_bpp = 16;
break;
case 16:
sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
break;
}
break;
}
}
 
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
904,7 → 885,6
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
915,11 → 895,6
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
 
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
 
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
992,7 → 967,7
int count = 0;
 
/* disable all the possible outputs/crtcs before entering KMS mode */
// drm_helper_disable_unused_functions(fb_helper->dev);
drm_helper_disable_unused_functions(fb_helper->dev);
 
// drm_fb_helper_parse_command_line(fb_helper);
 
/drivers/video/drm/drm_irq.c
33,7 → 33,7
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include "drmP.h"
#include <drm/drmP.h>
#include <asm/div64.h>
//#include "drm_trace.h"
 
41,6 → 41,7
#include <linux/slab.h>
 
//#include <linux/vgaarb.h>
#include <linux/export.h>
 
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) ( \
134,3 → 135,52
(int) linedur_ns, (int) pixeldur_ns);
}
 
 
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
#if 0
/* vblank is not initialized (IRQ not installed ?) */
if (!dev->num_crtcs)
return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
dev->vblank_inmodeset[crtc] |= 0x2;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
 
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
#if 0
unsigned long irqflags;
 
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
if (dev->vblank_inmodeset[crtc] & 0x2)
drm_vblank_put(dev, crtc);
 
dev->vblank_inmodeset[crtc] = 0;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
/drivers/video/drm/drm_mm.c
41,10 → 41,11
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
 
#include "drmP.h"
#include "drm_mm.h"
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/export.h>
 
#define MM_UNUSED_TARGET 4
 
117,39 → 118,46
 
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment)
unsigned long size, unsigned alignment,
unsigned long color)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
 
BUG_ON(!hole_node->hole_follows || node->allocated);
 
if (alignment)
tmp = hole_start % alignment;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (!tmp) {
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
adj_start += alignment - tmp;
}
 
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
} else
wasted = alignment - tmp;
list_del(&hole_node->hole_stack);
}
 
node->start = hole_start + wasted;
node->start = adj_start;
node->size = size;
node->mm = mm;
node->color = color;
node->allocated = 1;
 
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start + node->size > hole_end);
BUG_ON(node->start + node->size > adj_end);
 
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
 
156,6 → 164,7
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic)
{
struct drm_mm_node *node;
164,7 → 173,7
if (unlikely(node == NULL))
return NULL;
 
drm_mm_insert_helper(hole_node, node, size, alignment);
drm_mm_insert_helper(hole_node, node, size, alignment, color);
 
return node;
}
180,11 → 189,11
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free(mm, size, alignment, 0);
hole_node = drm_mm_search_free(mm, size, alignment, false);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper(hole_node, node, size, alignment);
drm_mm_insert_helper(hole_node, node, size, alignment, 0);
 
return 0;
}
193,44 → 202,50
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
 
BUG_ON(!hole_node->hole_follows || node->allocated);
 
if (hole_start < start)
wasted += start - hole_start;
if (alignment)
tmp = (hole_start + wasted) % alignment;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (adj_start < start)
adj_start = start;
 
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
wasted += alignment - tmp;
adj_start += alignment - tmp;
}
 
if (!wasted) {
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
list_del(&hole_node->hole_stack);
}
 
node->start = hole_start + wasted;
node->start = adj_start;
node->size = size;
node->mm = mm;
node->color = color;
node->allocated = 1;
 
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start + node->size > hole_end);
BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
 
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
 
237,6 → 252,7
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
247,7 → 263,7
if (unlikely(node == NULL))
return NULL;
 
drm_mm_insert_helper_range(hole_node, node, size, alignment,
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
 
return node;
266,11 → 282,11
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
start, end, 0);
start, end, false);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper_range(hole_node, node, size, alignment,
drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
start, end);
 
return 0;
335,8 → 351,6
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
unsigned wasted = 0;
 
if (end - start < size)
return 0;
 
343,19 → 357,17
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
wasted = alignment - tmp;
start += alignment - tmp;
}
 
if (end >= start + size + wasted) {
return 1;
}
 
return 0;
return end >= start + size;
}
 
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
unsigned alignment,
unsigned long color,
bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
367,10 → 379,17
best_size = ~0UL;
 
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry);
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
 
BUG_ON(!entry->hole_follows);
if (!check_free_hole(drm_mm_hole_node_start(entry),
drm_mm_hole_node_end(entry),
size, alignment))
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
if (!best_match)
384,14 → 403,15
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free);
EXPORT_SYMBOL(drm_mm_search_free_generic);
 
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int best_match)
bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
409,6 → 429,13
end : drm_mm_hole_node_end(entry);
 
BUG_ON(!entry->hole_follows);
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
 
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
423,7 → 450,7
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
436,6 → 463,7
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
new->color = old->color;
 
old->allocated = 0;
new->allocated = 1;
451,9 → 479,12
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment)
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
473,11 → 504,14
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
521,17 → 555,21
 
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
adj_start = hole_start < mm->scan_start ?
mm->scan_start : hole_start;
adj_end = hole_end > mm->scan_end ?
mm->scan_end : hole_end;
} else {
 
adj_start = hole_start;
adj_end = hole_end;
 
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
 
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
adj_start = mm->scan_start;
if (adj_end > mm->scan_end)
adj_end = mm->scan_end;
}
 
if (check_free_hole(adj_start , adj_end,
if (check_free_hole(adj_start, adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = hole_start;
mm->scan_hit_size = hole_end;
615,6 → 653,8
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
mm->color_adjust = NULL;
 
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
/drivers/video/drm/drm_modes.c
32,9 → 32,9
 
#include <linux/list.h>
#include <linux/list_sort.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
 
/**
* drm_mode_debug_printmodeline - debug print a mode
685,8 → 685,6
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
 
p->crtc_vtotal |= 1;
}
 
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
707,14 → 705,32
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
 
p->crtc_hadjusted = false;
p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 
 
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
* LOCKING:
* None.
*
* Copy an existing mode into another mode, preserving the object id
* of the destination mode.
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
int id = dst->base.id;
 
*dst = *src;
dst->base.id = id;
INIT_LIST_HEAD(&dst->head);
}
EXPORT_SYMBOL(drm_mode_copy);
 
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
*
728,16 → 744,13
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
 
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
 
new_id = nmode->base.id;
*nmode = *mode;
nmode->base.id = new_id;
INIT_LIST_HEAD(&nmode->head);
drm_mode_copy(nmode, mode);
 
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
/drivers/video/drm/drm_pci.c
0,0 → 1,140
/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
 
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
 
//#include <linux/pci.h>
//#include <linux/slab.h>
//#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
 
#include <syscall.h>
 
/**********************************************************************/
/** \name PCI memory */
/*@{*/
 
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
 
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
* Return NULL here for now to make sure nobody tries for larger alignment
*/
if (align > size)
return NULL;
 
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
 
dmah->size = size;
dmah->vaddr = (void*)KernelAlloc(size);
dmah->busaddr = GetPgAddr(dmah->vaddr);
 
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
 
memset(dmah->vaddr, 0, size);
 
return dmah;
}
 
 
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
u32 lnkcap, lnkcap2;
 
*mask = 0;
if (!dev->pdev)
return -EINVAL;
 
if (!pci_is_pcie(dev->pdev))
return -EINVAL;
 
return -EINVAL;
 
#if 0
root = dev->pdev->bus->self;
 
pos = pci_pcie_cap(root);
if (!pos)
return -EINVAL;
 
/* we've been informed via and serverworks don't make the cut */
// if (root->vendor == PCI_VENDOR_ID_VIA ||
// root->vendor == PCI_VENDOR_ID_SERVERWORKS)
// return -EINVAL;
 
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
 
lnkcap &= PCI_EXP_LNKCAP_SLS;
lnkcap2 &= 0xfe;
 
if (lnkcap2) { /* PCIE GEN 3.0 */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
} else {
if (lnkcap & 1)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap & 2)
*mask |= DRM_PCIE_SPEED_50;
}
 
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
return 0;
#endif
 
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
/drivers/video/drm/drm_stub.c
0,0 → 1,109
/**
* \file drm_stub.h
* Stub support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
 
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/module.h>
 
#include <linux/slab.h>
#include <drm/drmP.h>
 
struct va_format {
const char *fmt;
va_list *va;
};
 
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
 
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
 
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
 
int drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
va_list args;
int r;
 
va_start(args, format);
 
vaf.fmt = format;
vaf.va = &args;
 
r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
 
va_end(args);
 
return r;
}
EXPORT_SYMBOL(drm_err);
 
void drm_ut_debug_printk(unsigned int request_level,
const char *prefix,
const char *function_name,
const char *format, ...)
{
va_list args;
 
// if (drm_debug & request_level) {
// if (function_name)
// printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
// va_start(args, format);
// vprintk(format, args);
// va_end(args);
// }
}
EXPORT_SYMBOL(drm_ut_debug_printk);
 
/**
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
*
* \param size size.
* \return order.
*
* \todo Can be made faster.
*/
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
 
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 
if (size & (size - 1))
++order;
 
return order;
}
/drivers/video/drm/i2c/i2c-algo-bit.c
15,7 → 15,8
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA.
* ------------------------------------------------------------------------- */
 
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
24,11 → 25,13
#include <types.h>
#include <list.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <syscall.h>
#include <errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
 
#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
 
/* ----- global defines ----------------------------------------------- */
 
40,13 → 43,19
} while (0)
#else
#define bit_dbg(level, dev, format, args...) \
do {} while (0)
do { /* dbgprintf(format, ##args); */ } while (0)
#endif /* DEBUG */
 
/* ----- global variables --------------------------------------------- */
 
static int bit_test; /* see if the line-setting functions work */
static int bit_test = 0; /* see if the line-setting functions work */
 
#ifdef DEBUG
static int i2c_debug = 1;
module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(i2c_debug,
"debug level - 0 off; 1 normal; 2 verbose; 3 very verbose");
#endif
 
/* --- setting states on the bus with the right timing: --------------- */
 
87,7 → 96,7
if (!adap->getscl)
goto done;
 
// start = jiffies;
start = GetTimerTicks();
while (!getscl(adap)) {
/* This hw knows how to read the clock line, so we wait
* until it actually gets high. This is safer as some
94,19 → 103,16
* chips may hold it low ("clock stretching") while they
* are processing data internally.
*/
// if (time_after(jiffies, start + adap->timeout))
// return -ETIMEDOUT;
 
udelay(adap->udelay);
 
// cond_resched();
}
#ifdef DEBUG
if (jiffies != start && i2c_debug >= 3)
pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
"high\n", jiffies - start);
#endif
 
if (time_after(GetTimerTicks(), start + adap->timeout)) {
/* Test one last time, as we may have been preempted
* between last check and timeout test.
*/
if (getscl(adap))
break;
return -ETIMEDOUT;
}
udelay(1);
}
done:
udelay(adap->udelay);
return 0;
239,12 → 245,14
}
 
if (adap->getscl == NULL)
pr_info("%s: Testing SDA only, SCL is not readable\n", name);
dbgprintf("%s: Testing SDA only, SCL is not readable\n", name);
 
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (!scl || !sda) {
printk(KERN_WARNING "%s: bus seems to be busy\n", name);
printk(KERN_WARNING
"%s: bus seems to be busy (scl=%d, sda=%d)\n",
name, scl, sda);
goto bailout;
}
 
303,7 → 311,7
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
 
pr_info("%s: Test OK\n", name);
dbgprintf("%s: Test OK\n", name);
return 0;
bailout:
sdahi(adap);
372,7 → 380,7
* the SMBus PEC was wrong.
*/
} else if (retval == 0) {
// dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
return -EIO;
 
/* Timeout; or (someday) lost arbitration
383,8 → 391,8
* to know or care about this ... it is *NOT* an error.
*/
} else {
// dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
// retval);
dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
retval);
return retval;
}
}
400,8 → 408,8
setsda(adap, 0);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timeout */
// dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n");
// return -ETIMEDOUT;
dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n");
return -ETIMEDOUT;
}
scllo(adap);
return 0;
433,9 → 441,9
if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
if (!(flags & I2C_M_NO_RD_ACK))
acknak(i2c_adap, 0);
// dev_err(&i2c_adap->dev, "readbytes: invalid "
// "block length (%d)\n", inval);
return -EREMOTEIO;
dev_err(&i2c_adap->dev, "readbytes: invalid "
"block length (%d)\n", inval);
return -EPROTO;
}
/* The original count value accounts for the extra
bytes, that is, either 1 for a regular transaction,
464,7 → 472,7
* reads, writes as well as 10bit-addresses.
* returns:
* 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
* -x an error occurred (like: -EREMOTEIO if the device did not answer, or
* -x an error occurred (like: -ENXIO if the device did not answer, or
* -ETIMEDOUT, for example if the lines are stuck...)
*/
static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
480,21 → 488,21
 
if (flags & I2C_M_TEN) {
/* a ten bit address */
addr = 0xf0 | ((msg->addr >> 7) & 0x03);
addr = 0xf0 | ((msg->addr >> 7) & 0x06);
bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
/* try extended address code...*/
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok) {
// dev_err(&i2c_adap->dev,
// "died at extended address code\n");
return -EREMOTEIO;
dev_err(&i2c_adap->dev,
"died at extended address code\n");
return -ENXIO;
}
/* the remaining 8 bit address */
ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
ret = i2c_outb(i2c_adap, msg->addr & 0xff);
if ((ret != 1) && !nak_ok) {
/* the chip did not ack / xmission error occurred */
// dev_err(&i2c_adap->dev, "died at 2nd address code\n");
return -EREMOTEIO;
dev_err(&i2c_adap->dev, "died at 2nd address code\n");
return -ENXIO;
}
if (flags & I2C_M_RD) {
bit_dbg(3, &i2c_adap->dev, "emitting repeated "
504,9 → 512,9
addr |= 0x01;
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok) {
// dev_err(&i2c_adap->dev,
// "died at repeated address code\n");
return -EREMOTEIO;
dev_err(&i2c_adap->dev,
"died at repeated address code\n");
return -EIO;
}
}
} else { /* normal 7bit address */
531,8 → 539,7
int i, ret;
unsigned short nak_ok;
 
//ENTER();
if (adap->pre_xfer) {
if (adap->pre_xfer) {
ret = adap->pre_xfer(i2c_adap);
if (ret < 0)
return ret;
565,7 → 572,7
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
ret = -EREMOTEIO;
ret = -EIO;
goto bailout;
}
} else {
576,7 → 583,7
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
ret = -EREMOTEIO;
ret = -EIO;
goto bailout;
}
}
586,7 → 593,6
bailout:
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
i2c_stop(adap);
// LEAVE();
 
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
595,7 → 601,7
 
static u32 bit_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
604,10 → 610,11
 
/* -----exported algorithm data: ------------------------------------- */
 
static const struct i2c_algorithm i2c_bit_algo = {
const struct i2c_algorithm i2c_bit_algo = {
.master_xfer = bit_xfer,
.functionality = bit_func,
};
EXPORT_SYMBOL(i2c_bit_algo);
 
/*
* registering functions to load algorithms at runtime
620,7 → 627,7
 
if (bit_test) {
ret = test_bus(adap);
if (ret < 0)
if (bit_test >= 2 && ret < 0)
return -ENODEV;
}
 
628,6 → 635,11
adap->algo = &i2c_bit_algo;
adap->retries = 3;
 
/* Complain if SCL can't be read */
if (bit_adap->getscl == NULL) {
dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
dev_warn(&adap->dev, "Bus may be unreliable\n");
}
return 0;
}
 
/drivers/video/drm/i2c/i2c-core.c
14,15 → 14,20
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA. */
/* ------------------------------------------------------------------------- */
 
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
Jean Delvare <khali@linux-fr.org> */
Jean Delvare <khali@linux-fr.org>
Mux support by Rodolfo Giometti <giometti@enneenne.com> and
Michael Lawnick <michael.lawnick.ext@nsn.com> */
 
#include <types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <list.h>
#include <errno.h>
#include <linux/i2c.h>
29,7 → 34,254
#include <syscall.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
#if 0
 
static ssize_t
show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
}
 
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
static struct attribute *i2c_dev_attrs[] = {
&dev_attr_name.attr,
/* modalias helps coldplug: modprobe $(cat .../modalias) */
&dev_attr_modalias.attr,
NULL
};
 
static struct attribute_group i2c_dev_attr_group = {
.attrs = i2c_dev_attrs,
};
 
static const struct attribute_group *i2c_dev_attr_groups[] = {
&i2c_dev_attr_group,
NULL
};
 
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
.freeze = i2c_device_pm_freeze,
.thaw = i2c_device_pm_thaw,
.poweroff = i2c_device_pm_poweroff,
.restore = i2c_device_pm_restore,
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
)
};
 
struct bus_type i2c_bus_type = {
.name = "i2c",
.match = i2c_device_match,
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
.pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
 
static struct device_type i2c_client_type = {
.groups = i2c_dev_attr_groups,
.uevent = i2c_device_uevent,
.release = i2c_client_dev_release,
};
 
 
/**
* i2c_verify_client - return parameter as i2c_client, or NULL
* @dev: device, probably from some driver model iterator
*
* When traversing the driver model tree, perhaps using driver model
* iterators like @device_for_each_child(), you can't assume very much
* about the nodes you find. Use this function to avoid oopses caused
* by wrongly treating some non-I2C device as an i2c_client.
*/
struct i2c_client *i2c_verify_client(struct device *dev)
{
return (dev->type == &i2c_client_type)
? to_i2c_client(dev)
: NULL;
}
EXPORT_SYMBOL(i2c_verify_client);
 
 
/* This is a permissive address validity check, I2C address map constraints
* are purposely not enforced, except for the general call address. */
static int i2c_check_client_addr_validity(const struct i2c_client *client)
{
if (client->flags & I2C_CLIENT_TEN) {
/* 10-bit address, all values are valid */
if (client->addr > 0x3ff)
return -EINVAL;
} else {
/* 7-bit address, reject the general call address */
if (client->addr == 0x00 || client->addr > 0x7f)
return -EINVAL;
}
return 0;
}
 
/* And this is a strict address validity check, used when probing. If a
* device uses a reserved address, then it shouldn't be probed. 7-bit
* addressing is assumed, 10-bit address devices are rare and should be
* explicitly enumerated. */
static int i2c_check_addr_validity(unsigned short addr)
{
/*
* Reserved addresses per I2C specification:
* 0x00 General call address / START byte
* 0x01 CBUS address
* 0x02 Reserved for different bus format
* 0x03 Reserved for future purposes
* 0x04-0x07 Hs-mode master code
* 0x78-0x7b 10-bit slave addressing
* 0x7c-0x7f Reserved for future purposes
*/
if (addr < 0x08 || addr > 0x77)
return -EINVAL;
return 0;
}
 
static int __i2c_check_addr_busy(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
int addr = *(int *)addrp;
 
if (client && client->addr == addr)
return -EBUSY;
return 0;
}
 
/* walk up mux tree */
static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result;
 
result = device_for_each_child(&adapter->dev, &addr,
__i2c_check_addr_busy);
 
if (!result && parent)
result = i2c_check_mux_parents(parent, addr);
 
return result;
}
 
/* recurse down mux tree */
static int i2c_check_mux_children(struct device *dev, void *addrp)
{
int result;
 
if (dev->type == &i2c_adapter_type)
result = device_for_each_child(dev, addrp,
i2c_check_mux_children);
else
result = __i2c_check_addr_busy(dev, addrp);
 
return result;
}
 
static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result = 0;
 
if (parent)
result = i2c_check_mux_parents(parent, addr);
 
if (!result)
result = device_for_each_child(&adapter->dev, &addr,
i2c_check_mux_children);
 
return result;
}
 
/**
* i2c_lock_adapter - Get exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
void i2c_lock_adapter(struct i2c_adapter *adapter)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
 
if (parent)
i2c_lock_adapter(parent);
else
rt_mutex_lock(&adapter->bus_lock);
}
EXPORT_SYMBOL_GPL(i2c_lock_adapter);
 
/**
* i2c_trylock_adapter - Try to get exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
static int i2c_trylock_adapter(struct i2c_adapter *adapter)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
 
if (parent)
return i2c_trylock_adapter(parent);
else
return rt_mutex_trylock(&adapter->bus_lock);
}
 
/**
* i2c_unlock_adapter - Release exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
void i2c_unlock_adapter(struct i2c_adapter *adapter)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
 
if (parent)
i2c_unlock_adapter(parent);
else
rt_mutex_unlock(&adapter->bus_lock);
}
EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
 
#endif
 
 
/**
* i2c_transfer - execute a single or combined I2C message
* @adap: Handle to I2C bus
* @msgs: One or more messages to execute before STOP is issued to
65,22 → 317,24
 
if (adap->algo->master_xfer) {
 
/* Retry automatically on arbitration loss */
orig_jiffies = GetTimerTicks();
 
/* Retry automatically on arbitration loss */
orig_jiffies = 0;
for (ret = 0, try = 0; try <= adap->retries; try++) {
 
ret = adap->algo->master_xfer(adap, msgs, num);
if (ret != -EAGAIN)
break;
// if (time_after(jiffies, orig_jiffies + adap->timeout))
// break;
delay(1);
 
if (time_after(GetTimerTicks(), orig_jiffies + adap->timeout))
break;
 
delay(1);
}
// mutex_unlock(&adap->bus_lock);
return ret;
} else {
// dev_dbg(&adap->dev, "I2C level transfers not supported\n");
dbgprintf("I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
}
/drivers/video/drm/i915/i915_drm.h
File deleted
/drivers/video/drm/i915/Gtt/intel-gtt.h
File deleted
/drivers/video/drm/i915/Gtt/agp.h
37,8 → 37,8
SUPPORTED,
};
 
struct agp_memory;
 
 
#define PFX "agpgart: "
 
//#define AGP_DEBUG 1
/drivers/video/drm/i915/Gtt/intel-agp.c
109,15 → 109,10
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
{ }
};
 
/drivers/video/drm/i915/Gtt/intel-agp.h
64,6 → 64,7
#define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
96,6 → 97,7
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define GFX_FLSH_CNTL_VLV 0x101008
 
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
211,6 → 213,7
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
234,8 → 237,48
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
 
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(struct pci_dev *pdev);
#endif
/drivers/video/drm/i915/Gtt/intel-gtt.c
19,6 → 19,7
#include <errno-base.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/export.h>
//#include <linux/pagemap.h>
//#include <linux/agp_backend.h>
//#include <asm/smp.h>
32,9 → 33,7
struct pci_dev *
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
 
static bool intel_enable_gtt(void);
 
 
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
51,27 → 50,7
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
 
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
111,7 → 90,6
struct pci_dev *bridge_dev;
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
phys_addr_t gma_bus_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
121,7 → 99,7
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
dma_addr_t scratch_page_dma;
int refcount;
} intel_private;
 
#define INTEL_GTT_GEN intel_private.driver->gen
132,13 → 110,13
 
static int intel_gtt_setup_scratch_page(void)
{
addr_t page;
dma_addr_t dma_addr;
 
page = AllocPage();
if (page == 0)
dma_addr = AllocPage();
if (dma_addr == 0)
return -ENOMEM;
 
intel_private.scratch_page_dma = page;
intel_private.base.scratch_page_dma = dma_addr;
intel_private.scratch_page = NULL;
 
return 0;
441,8 → 419,8
{
intel_private.driver->cleanup();
 
FreeKernelSpace(intel_private.gtt);
FreeKernelSpace(intel_private.registers);
iounmap(intel_private.gtt);
iounmap(intel_private.registers);
 
intel_gtt_teardown_scratch_page();
}
449,6 → 427,7
 
static int intel_gtt_init(void)
{
u32 gma_addr;
u32 gtt_map_size;
int ret;
 
480,13 → 459,19
 
gtt_map_size = intel_private.base.gtt_total_entries * 4;
 
intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
gtt_map_size, PG_SW+PG_NOCACHE);
if (!intel_private.gtt) {
intel_private.gtt = NULL;
// if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
// intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
// gtt_map_size);
if (intel_private.gtt == NULL)
intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
gtt_map_size);
if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
FreeKernelSpace(intel_private.registers);
iounmap(intel_private.registers);
return -ENOMEM;
}
intel_private.base.gtt = intel_private.gtt;
 
asm volatile("wbinvd");
 
500,8 → 485,15
return ret;
}
 
intel_enable_gtt();
if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
else
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
 
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
LEAVE();
 
return 0;
518,20 → 510,10
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static bool intel_enable_gtt(void)
bool intel_enable_gtt(void)
{
u32 gma_addr;
u8 __iomem *reg;
 
if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
else
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
 
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
if (INTEL_GTT_GEN >= 6)
return true;
 
588,19 → 570,38
return false;
}
 
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags)
void intel_gtt_insert_sg_entries(struct pagelist *st,
unsigned int pg_start,
unsigned int flags)
{
int i, j;
 
j = pg_start;
 
for(i = 0; i < st->nents; i++)
{
dma_addr_t addr = st->page[i];
intel_private.driver->write_entry(addr, j, flags);
j++;
};
 
readl(intel_private.gtt+j-1);
}
 
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
dma_addr_t *pages,
unsigned int flags)
{
int i, j;
 
for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = (dma_addr_t)(pages[i]);
dma_addr_t addr = pages[i];
intel_private.driver->write_entry(addr,
j, flags);
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
608,7 → 609,7
unsigned int i;
 
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma,
intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
679,6 → 680,30
return true;
}
 
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
703,6 → 728,28
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else {
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
 
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
}
 
static void gen6_cleanup(void)
{
}
714,7 → 761,6
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
extern int intel_iommu_gfx_mapped;
 
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
730,13 → 776,16
static int i9xx_setup(void)
{
u32 reg_addr;
int size = KB(512);
 
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
 
reg_addr &= 0xfff80000;
 
intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
if (INTEL_GTT_GEN >= 7)
size = MB(2);
 
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
 
752,6 → 801,7
switch (INTEL_GTT_GEN) {
case 5:
case 6:
case 7:
gtt_offset = MB(2);
break;
case 4:
839,6 → 889,23
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
};
 
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
925,6 → 992,82
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
 
945,7 → 1088,7
return 1;
}
 
int intel_gmch_probe(struct pci_dev *pdev,
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge)
{
int i, mask;
962,11 → 1105,12
if (!intel_private.driver)
return 0;
 
// bridge->driver = &intel_fake_agp_driver;
bridge->dev_private_data = &intel_private;
bridge->dev = pdev;
if (bridge) {
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
}
 
intel_private.bridge_dev = pdev;
intel_private.bridge_dev = bridge_pdev;
 
dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
 
978,11 → 1122,11
// pci_set_consistent_dma_mask(intel_private.pcidev,
// DMA_BIT_MASK(mask));
 
/*if (bridge->driver == &intel_810_driver)
return 1;*/
if (intel_gtt_init() != 0) {
// intel_gmch_remove();
 
if (intel_gtt_init() != 0)
return 0;
}
 
return 1;
}
1002,7 → 1146,7
EXPORT_SYMBOL(intel_gtt_chipset_flush);
 
 
phys_addr_t get_bus_addr(void)
{
return intel_private.gma_bus_addr;
};
//phys_addr_t get_bus_addr(void)
//{
// return intel_private.gma_bus_addr;
//};
/drivers/video/drm/i915/dvo.h
24,9 → 24,8
#define _INTEL_DVO_H
 
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
 
struct intel_dvo_device {
58,13 → 57,12
void (*create_resources)(struct intel_dvo_device *dvo);
 
/*
* Turn on/off output or set intermediate power levels if available.
* Turn on/off output.
*
* Unsupported intermediate modes drop to the lower power setting.
* If the mode is DPMSModeOff, the output must be disabled,
* as the DPLL may be disabled afterwards.
* Because none of our dvo drivers support an intermediate power levels,
* we don't expose this in the interfac.
*/
void (*dpms)(struct intel_dvo_device *dvo, int mode);
void (*dpms)(struct intel_dvo_device *dvo, bool enable);
 
/*
* Callback for testing a video mode for a given output.
86,7 → 84,7
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
/*
115,6 → 113,12
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
 
/*
* Probe the current hw status, returning true if the connected output
* is active.
*/
bool (*get_hw_state)(struct intel_dvo_device *dev);
 
/**
* Query the device for the modes it provides.
*
140,5 → 144,6
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops;
 
#endif /* _INTEL_DVO_H */
/drivers/video/drm/i915/dvo_ch7017.c
163,7 → 163,7
};
 
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
 
static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
309,7 → 309,7
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
 
ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
331,7 → 331,7
}
 
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
 
345,7 → 345,7
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
 
if (mode == DRM_MODE_DPMS_ON) {
if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
359,6 → 359,18
msleep(20);
}
 
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
uint8_t val;
 
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
 
if (val & CH7017_LVDS_POWER_DOWN_EN)
return false;
else
return true;
}
 
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
396,6 → 408,7
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
.destroy = ch7017_destroy,
};
/drivers/video/drm/i915/dvo_ch7xxx.c
289,14 → 289,26
}
 
/* set the CH7xxx power state */
static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
if (mode == DRM_MODE_DPMS_ON)
if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
 
static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
{
u8 val;
 
ch7xxx_readb(dvo, CH7xxx_PM, &val);
 
if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
return true;
else
return false;
}
 
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
326,6 → 338,7
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};
/drivers/video/drm/i915/dvo_ivch.c
288,7 → 288,7
}
 
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
297,13 → 297,13
if (!ivch_read(dvo, VR01, &vr01))
return;
 
if (mode == DRM_MODE_DPMS_ON)
if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
 
if (mode == DRM_MODE_DPMS_ON)
if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
315,7 → 315,7
if (!ivch_read(dvo, VR30, &vr30))
break;
 
if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
323,6 → 323,20
udelay(16 * 1000);
}
 
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
uint16_t vr01;
 
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
 
if (vr01 & VR01_LCD_ENABLE)
return true;
else
return false;
}
 
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
413,6 → 427,7
struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
.get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
</
/drivers/video/drm/i915/dvo_ns2501.c
0,0 → 1,588
/*
*
* Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#include "dvo.h"
#include "i915_reg.h"
#include "i915_drv.h"
 
#define NS2501_VID 0x1305
#define NS2501_DID 0x6726
 
#define NS2501_VID_LO 0x00
#define NS2501_VID_HI 0x01
#define NS2501_DID_LO 0x02
#define NS2501_DID_HI 0x03
#define NS2501_REV 0x04
#define NS2501_RSVD 0x05
#define NS2501_FREQ_LO 0x06
#define NS2501_FREQ_HI 0x07
 
#define NS2501_REG8 0x08
#define NS2501_8_VEN (1<<5)
#define NS2501_8_HEN (1<<4)
#define NS2501_8_DSEL (1<<3)
#define NS2501_8_BPAS (1<<2)
#define NS2501_8_RSVD (1<<1)
#define NS2501_8_PD (1<<0)
 
#define NS2501_REG9 0x09
#define NS2501_9_VLOW (1<<7)
#define NS2501_9_MSEL_MASK (0x7<<4)
#define NS2501_9_TSEL (1<<3)
#define NS2501_9_RSEN (1<<2)
#define NS2501_9_RSVD (1<<1)
#define NS2501_9_MDI (1<<0)
 
#define NS2501_REGC 0x0c
 
struct ns2501_priv {
//I2CDevRec d;
bool quiet;
int reg_8_shadow;
int reg_8_set;
// Shadow registers for i915
int dvoc;
int pll_a;
int srcdim;
int fw_blc;
};
 
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
 
/*
* For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
* laptops does not react on the i2c bus unless
* both the PLL is running and the display is configured in its native
* resolution.
* This function forces the DVO on, and stores the registers it touches.
* Afterwards, registers are restored to regular values.
*
* This is pretty much a hack, though it works.
* Without that, ns2501_readb and ns2501_writeb fail
* when switching the resolution.
*/
 
static void enable_dvo(struct intel_dvo_device *dvo)
{
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
struct i2c_adapter *adapter = dvo->i2c_bus;
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
 
DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
 
ns->dvoc = I915_READ(DVO_C);
ns->pll_a = I915_READ(_DPLL_A);
ns->srcdim = I915_READ(DVOC_SRCDIM);
ns->fw_blc = I915_READ(FW_BLC);
 
I915_WRITE(DVOC, 0x10004084);
I915_WRITE(_DPLL_A, 0xd0820000);
I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
I915_WRITE(FW_BLC, 0x1080304);
 
I915_WRITE(DVOC, 0x90004084);
}
 
/*
* Restore the I915 registers modified by the above
* trigger function.
*/
static void restore_dvo(struct intel_dvo_device *dvo)
{
struct i2c_adapter *adapter = dvo->i2c_bus;
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
I915_WRITE(DVOC, ns->dvoc);
I915_WRITE(_DPLL_A, ns->pll_a);
I915_WRITE(DVOC_SRCDIM, ns->srcdim);
I915_WRITE(FW_BLC, ns->fw_blc);
}
 
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
 
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
 
out_buf[0] = addr;
out_buf[1] = 0;
 
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
 
if (!ns->quiet) {
DRM_DEBUG_KMS
("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
adapter->name, dvo->slave_addr);
}
 
return false;
}
 
/*
** Write a register to the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
 
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
 
out_buf[0] = addr;
out_buf[1] = ch;
 
if (i2c_transfer(adapter, &msg, 1) == 1) {
return true;
}
 
if (!ns->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
addr, adapter->name, dvo->slave_addr);
}
 
return false;
}
 
/* National Semiconductor 2501 driver for chip on i2c bus
* scan for the chip on the bus.
* Hope the VBIOS initialized the PLL correctly so we can
* talk to it. If not, it will not be seen and not detected.
* Bummer!
*/
static bool ns2501_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the NS2501 chip on the specified i2c bus */
struct ns2501_priv *ns;
unsigned char ch;
 
ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
if (ns == NULL)
return false;
 
dvo->i2c_bus = adapter;
dvo->dev_priv = ns;
ns->quiet = true;
 
if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
goto out;
 
if (ch != (NS2501_VID & 0xff)) {
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
 
if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
goto out;
 
if (ch != (NS2501_DID & 0xff)) {
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
ns->quiet = false;
ns->reg_8_set = 0;
ns->reg_8_shadow =
NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
 
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
return true;
 
out:
kfree(ns);
return false;
}
 
static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
{
/*
* This is a Laptop display, it doesn't have hotplugging.
* Even if not, the detection bit of the 2501 is unreliable as
* it only works for some display types.
* It is even more unreliable as the PLL must be active for
* allowing reading from the chiop.
*/
return connector_status_connected;
}
 
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
 
/*
* Currently, these are all the modes I have data from.
* More might exist. Unclear how to find the native resolution
* of the panel in here so we could always accept it
* by disabling the scaler.
*/
if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
(mode->hdisplay == 640 && mode->vdisplay == 480) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768)) {
return MODE_OK;
} else {
return MODE_ONE_SIZE; /* Is this a reasonable error? */
}
}
 
static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ok;
bool restore = false;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
DRM_DEBUG_KMS
("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
 
/*
* Where do I find the native resolution for which scaling is not required???
*
* First trigger the DVO on as otherwise the chip does not appear on the i2c
* bus.
*/
do {
ok = true;
 
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
DRM_DEBUG_KMS("%s: switching to 800x600\n",
__FUNCTION__);
 
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
ok &= ns2501_writeb(dvo, 0x1b, 0x19);
ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
ok &= ns2501_writeb(dvo, 0x1d, 0x02);
 
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
 
ok &= ns2501_writeb(dvo, 0x80, 0x27);
ok &= ns2501_writeb(dvo, 0x81, 0x03);
ok &= ns2501_writeb(dvo, 0x82, 0x41);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
 
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x04);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
 
ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);