Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 3030 → Rev 3031

/drivers/ddk/Makefile
29,6 → 29,7
linux/dmapool.c \
linux/ctype.c \
linux/string.c \
linux/time.c \
malloc/malloc.c \
stdio/vsprintf.c \
string/_memmove.S \
/drivers/ddk/core.S
35,6 → 35,8
 
.global _MapIoMem
 
.global _MapPage
 
.global _MutexInit
.global _MutexLock
.global _MutexUnlock
95,6 → 97,8
 
.def _MapIoMem; .scl 2; .type 32; .endef
 
.def _MapPage; .scl 2; .type 32; .endef
 
.def _MutexInit; .scl 2; .type 32; .endef
.def _MutexLock; .scl 2; .type 32; .endef
.def _MutexUnlock; .scl 2; .type 32; .endef
155,6 → 159,8
 
_MapIoMem:
 
_MapPage:
 
_MutexInit:
_MutexLock:
_MutexUnlock:
219,6 → 225,7
.ascii " -export:KernelFree" # stdcall
 
.ascii " -export:MapIoMem" # stdcall
.ascii " -export:MapPage" # stdcall
 
.ascii " -export:MutexInit" # fastcall
.ascii " -export:MutexLock" # fastcall
/drivers/ddk/linux/time.c
0,0 → 1,148
#include <jiffies.h>
 
 
 
#define HZ_TO_MSEC_MUL32 0xA0000000
#define HZ_TO_MSEC_ADJ32 0x0
#define HZ_TO_MSEC_SHR32 28
#define HZ_TO_MSEC_MUL64 0xA000000000000000
#define HZ_TO_MSEC_ADJ64 0x0
#define HZ_TO_MSEC_SHR64 60
#define MSEC_TO_HZ_MUL32 0xCCCCCCCD
#define MSEC_TO_HZ_ADJ32 0x733333333
#define MSEC_TO_HZ_SHR32 35
#define MSEC_TO_HZ_MUL64 0xCCCCCCCCCCCCCCCD
#define MSEC_TO_HZ_ADJ64 0x73333333333333333
#define MSEC_TO_HZ_SHR64 67
#define HZ_TO_MSEC_NUM 10
#define HZ_TO_MSEC_DEN 1
#define MSEC_TO_HZ_NUM 1
#define MSEC_TO_HZ_DEN 10
 
#define HZ_TO_USEC_MUL32 0x9C400000
#define HZ_TO_USEC_ADJ32 0x0
#define HZ_TO_USEC_SHR32 18
#define HZ_TO_USEC_MUL64 0x9C40000000000000
#define HZ_TO_USEC_ADJ64 0x0
#define HZ_TO_USEC_SHR64 50
#define USEC_TO_HZ_MUL32 0xD1B71759
#define USEC_TO_HZ_ADJ32 0x1FFF2E48E8A7
#define USEC_TO_HZ_SHR32 45
#define USEC_TO_HZ_MUL64 0xD1B71758E219652C
#define USEC_TO_HZ_ADJ64 0x1FFF2E48E8A71DE69AD4
#define USEC_TO_HZ_SHR64 77
#define HZ_TO_USEC_NUM 10000
#define HZ_TO_USEC_DEN 1
#define USEC_TO_HZ_NUM 1
#define USEC_TO_HZ_DEN 10000
 
 
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000LL
 
 
unsigned int jiffies_to_msecs(const unsigned long j)
{
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
return (MSEC_PER_SEC / HZ) * j;
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
# else
return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
# endif
#endif
}
 
unsigned int jiffies_to_usecs(const unsigned long j)
{
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j;
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
# else
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
# endif
#endif
}
 
 
/*
* When we convert to jiffies then we interpret incoming values
* the following way:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor
*
* We must also be careful about 32-bit overflows.
*/
unsigned long msecs_to_jiffies(const unsigned int m)
{
/*
* Negative value, means infinite timeout:
*/
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
 
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice
* round multiple of HZ, divide with the factor between them,
* but round upwards:
*/
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of
* 1000 - simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot
* overflow:
*/
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return m * (HZ / MSEC_PER_SEC);
#else
/*
* Generic case - multiply, round and divide. But first
* check that if we are doing a net multiplication, that
* we wouldn't overflow:
*/
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
>> MSEC_TO_HZ_SHR32;
#endif
}
 
unsigned long usecs_to_jiffies(const unsigned int u)
{
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * (HZ / USEC_PER_SEC);
#else
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
#endif
}
 
/drivers/ddk/stdio/vsprintf.c
24,6 → 24,7
#include <linux/kernel.h>
#include <errno-base.h>
#include <linux/ioport.h>
#include <linux/export.h>
 
#include <asm/div64.h>
 
/drivers/include/errno-base.h
File deleted
/drivers/include/ddk.h
4,7 → 4,10
#define __DDK_H__
 
#include <kernel.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <mutex.h>
#include <linux/pci.h>
 
 
#define OS_BASE 0x80000000
56,7 → 59,24
u32_t drvEntry(int, char *)__asm__("_drvEntry");
 
 
#define __WARN() dbgprintf(__FILE__, __LINE__)
 
#ifndef WARN_ON
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN(); \
unlikely(__ret_warn_on); \
})
#endif
 
 
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
{
// if (size != 0 && n > SIZE_MAX / size)
// return NULL;
return kmalloc(n * size, flags);
}
 
 
#endif /* DDK_H */
/drivers/include/drm/drmP.hh
File deleted
/drivers/include/drm/drmP.h
36,15 → 36,22
#define _DRM_P_H_
 
#ifdef __KERNEL__
#ifdef __alpha__
/* add include of current.h so that "current" is defined
* before static inline funcs in wait.h. Doing this so we
* can build the DRM (part of PI DRI). 4/21/2000 S + B */
#include <asm/current.h>
#endif /* __alpha__ */
 
#include <syscall.h>
 
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/kref.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/bug.h>
 
//#include <linux/miscdevice.h>
//#include <linux/fs.h>
72,25 → 79,15
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
 
struct module;
 
 
 
#include <drm_edid.h>
#include <drm_crtc.h>
 
 
struct drm_file;
struct drm_device;
 
//#include "drm_os_linux.h"
#include "drm_hashtab.h"
#include "drm_mm.h"
//#include <drm/drm_os_linux.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
 
#define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_MODE 0x08
 
#define KHZ2PICOS(a) (1000000000UL/(a))
 
/* get_scanout_position() return flags */
99,45 → 96,51
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
 
 
extern void drm_ut_debug_printk(unsigned int request_level,
 
#define DRM_UT_CORE 0x01
#define DRM_UT_DRIVER 0x02
#define DRM_UT_KMS 0x04
#define DRM_UT_PRIME 0x08
/*
* Three debug levels are defined.
* drm_core, drm_driver, drm_kms
* drm_core level can be used in the generic drm code. For example:
* drm_ioctl, drm_mm, drm_memory
* The macro definition of DRM_DEBUG is used.
* DRM_DEBUG(fmt, args...)
* The debug info by using the DRM_DEBUG can be obtained by adding
* the boot option of "drm.debug=1".
*
* drm_driver level can be used in the specific drm driver. It is used
* to add the debug info related with the drm driver. For example:
* i915_drv, i915_dma, i915_gem, radeon_drv,
* The macro definition of DRM_DEBUG_DRIVER can be used.
* DRM_DEBUG_DRIVER(fmt, args...)
* The debug info by using the DRM_DEBUG_DRIVER can be obtained by
* adding the boot option of "drm.debug=0x02"
*
* drm_kms level can be used in the KMS code related with specific drm driver.
* It is used to add the debug info related with KMS mode. For example:
* the connector/crtc ,
* The macro definition of DRM_DEBUG_KMS can be used.
* DRM_DEBUG_KMS(fmt, args...)
* The debug info by using the DRM_DEBUG_KMS can be obtained by
* adding the boot option of "drm.debug=0x04"
*
* If we add the boot option of "drm.debug=0x06", we can get the debug info by
* using the DRM_DEBUG_KMS and DRM_DEBUG_DRIVER.
* If we add the boot option of "drm.debug=0x05", we can get the debug info by
* using the DRM_DEBUG_KMS and DRM_DEBUG.
*/
 
extern __printf(4, 5)
void drm_ut_debug_printk(unsigned int request_level,
const char *prefix,
const char *function_name,
const char *format, ...);
extern __printf(2, 3)
int drm_err(const char *func, const char *format, ...);
 
#define DRM_DEBUG_MODE(prefix, fmt, args...) \
do { \
dbgprintf("drm debug: %s" fmt, \
__func__, ##args); \
} while (0)
 
#define DRM_DEBUG(fmt, args...) \
do { \
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \
} while(0)
 
#define DRM_DEBUG_KMS(fmt, args...) \
do { \
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \
} while(0)
 
#define DRM_DEBUG_DRIVER(fmt, args...) \
do { \
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##args); \
} while (0)
 
#define DRM_LOG_KMS(fmt, args...) \
do { \
printk("[" DRM_NAME "]" fmt, ##args); \
} while (0)
 
static inline int drm_sysfs_connector_add(struct drm_connector *connector)
{ return 0; };
 
static inline void drm_sysfs_connector_remove(struct drm_connector *connector)
{ };
 
#if 0
 
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
157,6 → 160,7
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
 
#define DRIVER_BUS_PCI 0x1
#define DRIVER_BUS_PLATFORM 0x2
193,22 → 197,12
* \param fmt printf() like format string.
* \param arg arguments
*/
#define DRM_ERROR(fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
#define DRM_ERROR(fmt, ...) \
drm_err(__func__, fmt, ##__VA_ARGS__)
 
/**
* Memory error output.
*
* \param area memory area where the error occurred.
* \param fmt printf() like format string.
* \param arg arguments
*/
#define DRM_MEM_ERROR(area, fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
drm_mem_stats[area].name , ##arg)
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
 
#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
 
/**
* Debug output.
*
216,45 → 210,43
* \param arg arguments
*/
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, args...) \
#define DRM_DEBUG(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
__func__, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
 
#define DRM_DEBUG_DRIVER(fmt, args...) \
#define DRM_DEBUG_DRIVER(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_DRIVER, DRM_NAME, \
__func__, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_DEBUG_KMS(fmt, args...) \
#define DRM_DEBUG_KMS(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_KMS, DRM_NAME, \
__func__, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG(fmt, args...) \
#define DRM_DEBUG_PRIME(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_CORE, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_KMS(fmt, args...) \
#define DRM_LOG(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_KMS, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_MODE(fmt, args...) \
#define DRM_LOG_KMS(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_MODE, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_DRIVER(fmt, args...) \
#define DRM_LOG_MODE(fmt, ...) \
do { \
drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
NULL, fmt, ##args); \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#define DRM_LOG_DRIVER(fmt, ...) \
do { \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__); \
} while (0)
#else
#define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
#define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
#define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0)
#define DRM_LOG(fmt, arg...) do { } while (0)
#define DRM_LOG_KMS(fmt, args...) do { } while (0)
293,6 → 285,7
} \
} while (0)
 
#if 0
/**
* Ioctl function type.
*
357,7 → 350,6
struct drm_buf *next; /**< Kernel-only: used for free list */
__volatile__ int waiting; /**< On kernel DMA queue */
__volatile__ int pending; /**< On hardware DMA queue */
wait_queue_head_t dma_wait; /**< Processes waiting */
struct drm_file *file_priv; /**< Private of holding file descr */
int context; /**< Kernel queue for this buffer */
int while_locked; /**< Dispatch this buffer while locked */
429,11 → 421,17
void (*destroy)(struct drm_pending_event *event);
};
 
/* initial implementaton using a linked list - todo hashtab */
struct drm_prime_file_private {
struct list_head head;
struct mutex lock;
};
 
/** File private data */
struct drm_file {
int authenticated;
pid_t pid;
uid_t uid;
struct pid *pid;
kuid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
struct list_head lhead;
456,6 → 454,8
wait_queue_head_t event_wait;
struct list_head event_list;
int event_space;
 
struct drm_prime_file_private prime;
};
 
/** Wait queue */
677,7 → 677,7
void *driver_private;
};
 
#include "drm_crtc.h"
#include <drm/drm_crtc.h>
 
/* per-master structure */
struct drm_master {
758,11 → 758,11
* @dev: DRM device
* @crtc: counter to fetch
*
* Driver callback for fetching a raw hardware vblank counter
* for @crtc. If a device doesn't have a hardware counter, the
* driver can simply return the value of drm_vblank_count and
* make the enable_vblank() and disable_vblank() hooks into no-ops,
* leaving interrupts enabled at all times.
* Driver callback for fetching a raw hardware vblank counter for @crtc.
* If a device doesn't have a hardware counter, the driver can simply
* return the value of drm_vblank_count. The DRM core will account for
* missed vblank events while interrupts where disabled based on system
* timestamps.
*
* Wraparound handling and loss of events due to modesetting is dealt
* with in the DRM core code.
879,12 → 879,6
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file * file_priv);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct drm_file *file_priv);
void (*set_version) (struct drm_device *dev,
struct drm_set_version *sv);
 
915,6 → 909,20
int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
 
/* prime: */
/* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
/* export GEM -> dmabuf */
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
/* import dmabuf -> GEM */
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
 
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
 
930,7 → 938,7
uint32_t handle);
 
/* Driver private ops for this object */
struct vm_operations_struct *gem_vm_ops;
const struct vm_operations_struct *gem_vm_ops;
 
int major;
int minor;
1092,12 → 1100,8
 
/*@} */
 
/** \name DMA queues (contexts) */
/** \name DMA support */
/*@{ */
int queue_count; /**< Number of active DMA queues */
int queue_reserved; /**< Number of reserved DMA queues */
int queue_slots; /**< Actual length of queuelist */
// struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
// struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
 
1182,6 → 1186,8
struct idr object_name_idr;
/*@} */
int switch_power_state;
 
atomic_t unplugged; /* device has been unplugged or gone away */
};
 
#define DRM_SWITCH_POWER_ON 0
1272,17 → 1278,12
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma);
extern void drm_vm_close_locked(struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
 
/* Memory management support (drm_memory.h) */
#include "drm_memory.h"
extern void drm_mem_init(void);
extern int drm_mem_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
 
#include <drm/drm_memory.h>
extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
1346,6 → 1347,8
 
/* Cache management (drm_cache.c) */
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(char *addr, unsigned long length);
 
/* Locking IOCTL support (drm_lock.h) */
extern int drm_lock(struct drm_device *dev, void *data,
1397,12 → 1400,8
/* IRQ support (drm_irq.h) */
extern int drm_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
extern int drm_irq_install(struct drm_device *dev);
extern int drm_irq_uninstall(struct drm_device *dev);
extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev);
extern void drm_driver_irq_uninstall(struct drm_device *dev);
 
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
1478,8 → 1477,12
 
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern void drm_unplug_dev(struct drm_device *dev);
#endif
 
extern unsigned int drm_debug;
 
#if 0
extern unsigned int drm_vblank_offdelay;
extern unsigned int drm_timestamp_precision;
 
1510,7 → 1513,6
/* Info file support */
extern int drm_name_info(struct seq_file *m, void *data);
extern int drm_vm_info(struct seq_file *m, void *data);
extern int drm_queues_info(struct seq_file *m, void *data);
extern int drm_bufs_info(struct seq_file *m, void *data);
extern int drm_vblank_info(struct seq_file *m, void *data);
extern int drm_clients_info(struct seq_file *m, void* data);
1533,6 → 1535,7
struct drm_ati_pcigart_info * gart_info);
extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info * gart_info);
#endif
 
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
1539,6 → 1542,7
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
 
#if 0
/* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
1680,6 → 1684,7
}
 
 
 
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
{
if (size * nmemb <= PAGE_SIZE)
1702,7 → 1707,12
 
#endif
 
#define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2
#define DRM_PCIE_SPEED_80 4
 
extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
 
static __inline__ int drm_device_is_agp(struct drm_device *dev)
{
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
1713,4 → 1723,8
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
#endif /* __KERNEL__ */
 
#define drm_sysfs_connector_add(connector)
#define drm_sysfs_connector_remove(connector)
 
#endif
/drivers/include/drm/drm_crtc.h
30,6 → 30,7
#include <linux/types.h>
#include <linux/idr.h>
#include <linux/fb.h>
#include <drm/drm_mode.h>
 
#include <drm/drm_fourcc.h>
 
36,6 → 37,7
struct drm_device;
struct drm_mode_set;
struct drm_framebuffer;
struct drm_object_properties;
 
 
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
50,8 → 52,16
struct drm_mode_object {
uint32_t id;
uint32_t type;
struct drm_object_properties *properties;
};
 
#define DRM_OBJECT_MAX_PROPERTY 24
struct drm_object_properties {
int count;
uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
uint64_t values[DRM_OBJECT_MAX_PROPERTY];
};
 
/*
* Note on terminology: here, for brevity and convenience, we refer to connector
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS,
109,7 → 119,8
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f), .vrefresh = 0
.vscan = (vs), .flags = (f), .vrefresh = 0, \
.base.type = DRM_MODE_OBJECT_MODE
 
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */
 
121,7 → 132,7
char name[DRM_DISPLAY_MODE_LEN];
 
enum drm_mode_status status;
int type;
unsigned int type;
 
/* Proposed mode values */
int clock; /* in kHz */
157,8 → 168,6
int crtc_vsync_start;
int crtc_vsync_end;
int crtc_vtotal;
int crtc_hadjusted;
int crtc_vadjusted;
 
/* Driver private mode info */
int private_size;
207,11 → 216,10
u32 color_formats;
 
u8 cea_rev;
 
char *raw_edid; /* if any */
};
 
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
int (*create_handle)(struct drm_framebuffer *fb,
struct drm_file *file_priv,
257,7 → 265,7
struct drm_mode_object base;
struct list_head head;
unsigned int length;
void *data;
unsigned char data[];
};
 
struct drm_property_enum {
285,19 → 293,16
 
/**
* drm_crtc_funcs - control CRTCs for a given device
* @save: save CRTC state
* @restore: restore CRTC state
* @reset: reset CRTC after state has been invalidate (e.g. resume)
* @dpms: control display power levels
* @save: save CRTC state
* @resore: restore CRTC state
* @lock: lock the CRTC
* @unlock: unlock the CRTC
* @shadow_allocate: allocate shadow pixmap
* @shadow_create: create shadow pixmap for rotation support
* @shadow_destroy: free shadow pixmap
* @mode_fixup: fixup proposed mode
* @mode_set: set the desired mode on the CRTC
* @cursor_set: setup the cursor
* @cursor_move: move the cursor
* @gamma_set: specify color ramp for CRTC
* @destroy: deinit and free object.
* @destroy: deinit and free object
* @set_property: called when a property is changed
* @set_config: apply a new CRTC configuration
* @page_flip: initiate a page flip
*
* The drm_crtc_funcs structure is the central CRTC management structure
* in the DRM. Each CRTC controls one or more connectors (note that the name
341,6 → 346,9
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
 
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
};
 
/**
351,6 → 359,9
* @enabled: is this CRTC enabled?
* @mode: current mode timings
* @hwmode: mode timings as programmed to hw regs
* @invert_dimensions: for purposes of error checking crtc vs fb sizes,
* invert the width/height of the crtc. This is used if the driver
* is performing 90 or 270 degree rotated scanout
* @x: x position on screen
* @y: y position on screen
* @funcs: CRTC control functions
360,6 → 371,7
* @framedur_ns: precise line timing
* @pixeldur_ns: precise pixel timing
* @helper_private: mid-layer private data
* @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
383,6 → 395,8
*/
struct drm_display_mode hwmode;
 
bool invert_dimensions;
 
int x, y;
const struct drm_crtc_funcs *funcs;
 
395,6 → 409,8
 
/* if you are using the helper */
void *helper_private;
 
struct drm_object_properties properties;
};
 
 
404,11 → 420,8
* @save: save connector state
* @restore: restore connector state
* @reset: reset connector after state has been invalidate (e.g. resume)
* @mode_valid: is this mode valid on the given connector?
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
* @detect: is this connector active?
* @get_modes: get mode list for this connector
* @fill_modes: fill mode list for this connector
* @set_property: property for this connector may need update
* @destroy: make object go away
* @force: notify the driver the connector is forced on
451,7 → 464,6
};
 
#define DRM_CONNECTOR_MAX_UMODES 16
#define DRM_CONNECTOR_MAX_PROPERTY 16
#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 3
 
520,8 → 532,7
* @funcs: connector control functions
* @user_modes: user added mode list
* @edid_blob_ptr: DRM property containing EDID if present
* @property_ids: property tracking for this connector
* @property_values: value pointers or data for properties
* @properties: property tracking for this connector
* @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
* @dpms: current dpms state
* @helper_private: mid-layer private data
565,8 → 576,7
 
struct list_head user_modes;
struct drm_property_blob *edid_blob_ptr;
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
struct drm_object_properties properties;
 
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
 
588,6 → 598,7
int video_latency[2]; /* [0]: progressive, [1]: interlaced */
int audio_latency[2];
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
};
 
/**
595,6 → 606,7
* @update_plane: update the plane configuration
* @disable_plane: shut down the plane
* @destroy: clean up plane resources
* @set_property: called when a property is changed
*/
struct drm_plane_funcs {
int (*update_plane)(struct drm_plane *plane,
605,6 → 617,9
uint32_t src_w, uint32_t src_h);
int (*disable_plane)(struct drm_plane *plane);
void (*destroy)(struct drm_plane *plane);
 
int (*set_property)(struct drm_plane *plane,
struct drm_property *property, uint64_t val);
};
 
/**
622,6 → 637,7
* @enabled: enabled flag
* @funcs: helper functions
* @helper_private: storage for drver layer
* @properties: property tracking for this plane
*/
struct drm_plane {
struct drm_device *dev;
644,6 → 660,8
 
const struct drm_plane_funcs *funcs;
void *helper_private;
 
struct drm_object_properties properties;
};
 
/**
663,8 → 681,6
* This is used to set modes.
*/
struct drm_mode_set {
struct list_head head;
 
struct drm_framebuffer *fb;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
761,7 → 777,7
 
int min_width, min_height;
int max_width, max_height;
struct drm_mode_config_funcs *funcs;
const struct drm_mode_config_funcs *funcs;
resource_size_t fb_base;
 
/* output poll support */
796,6 → 812,9
struct drm_property *scaling_mode_property;
struct drm_property *dithering_mode_property;
struct drm_property *dirty_info_property;
 
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
};
 
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
807,20 → 826,26
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base)
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
 
struct drm_prop_enum_list {
int type;
char *name;
};
 
extern void drm_crtc_init(struct drm_device *dev,
extern int drm_crtc_init(struct drm_device *dev,
struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
 
extern void drm_connector_init(struct drm_device *dev,
extern int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
 
extern void drm_connector_cleanup(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
 
extern void drm_encoder_init(struct drm_device *dev,
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type);
843,11 → 868,13
extern char *drm_get_tv_select_name(int val);
extern void drm_fb_release(struct drm_file *file_priv);
extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
862,7 → 889,7
/* for us by fb module */
extern int drm_mode_attachmode_crtc(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_display_mode *mode);
const struct drm_display_mode *mode);
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode);
 
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
888,6 → 915,12
extern int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property,
uint64_t *value);
extern int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
extern int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
extern void drm_framebuffer_set_object(struct drm_device *dev,
unsigned long handle);
894,6 → 927,9
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
900,10 → 936,24
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
extern bool drm_crtc_in_use(struct drm_crtc *crtc);
 
extern int drm_connector_attach_property(struct drm_connector *connector,
extern void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val);
extern void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val);
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags,
const char *name, int num_values);
extern struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values);
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max);
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property);
extern int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name);
919,7 → 969,7
struct drm_encoder *encoder);
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
extern int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
995,7 → 1045,28
int hdisplay, int vdisplay);
 
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
int hsize, int vsize, int fresh,
bool rb);
 
extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp);
extern int drm_format_num_planes(uint32_t format);
extern int drm_format_plane_cpp(uint32_t format, int plane);
extern int drm_format_horz_chroma_subsampling(uint32_t format);
extern int drm_format_vert_chroma_subsampling(uint32_t format);
 
#endif /* __DRM_CRTC_H__ */
/drivers/include/drm/drm_crtc_helper.h
44,6 → 44,13
ENTER_ATOMIC_MODE_SET,
};
 
/**
* drm_crtc_helper_funcs - helper operations for CRTCs
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_crtc_helper_funcs {
/*
* Control power levels on the CRTC. If the mode passed in is
55,7 → 62,7
 
/* Provider can fixup or change mode timings before modeset occurs */
bool (*mode_fixup)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
/* Actually set the mode */
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
76,6 → 83,13
void (*disable)(struct drm_crtc *crtc);
};
 
/**
* drm_encoder_helper_funcs - helper operations for encoders
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_encoder_helper_funcs {
void (*dpms)(struct drm_encoder *encoder, int mode);
void (*save)(struct drm_encoder *encoder);
82,7 → 96,7
void (*restore)(struct drm_encoder *encoder);
 
bool (*mode_fixup)(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*prepare)(struct drm_encoder *encoder);
void (*commit)(struct drm_encoder *encoder);
97,6 → 111,13
void (*disable)(struct drm_encoder *encoder);
};
 
/**
* drm_connector_helper_funcs - helper operations for connectors
* @get_modes: get mode list for this connector
* @mode_valid: is this mode valid on the given connector?
*
* The helper operations are called by the mid-layer CRTC helper.
*/
struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
int (*mode_valid)(struct drm_connector *connector,
145,6 → 166,4
extern void drm_kms_helper_poll_disable(struct drm_device *dev);
extern void drm_kms_helper_poll_enable(struct drm_device *dev);
 
extern int drm_format_num_planes(uint32_t format);
 
#endif
/drivers/include/drm/drm_dp_helper.h
26,7 → 26,19
#include <linux/types.h>
#include <linux/i2c.h>
 
/* From the VESA DisplayPort spec */
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
* DP and DPCD versions are independent. Differences from 1.0 are not noted,
* 1.0 devices basically don't exist in the wild.
*
* Abbreviations, in chronological order:
*
* eDP: Embedded DisplayPort version 1
* DPI: DisplayPort Interoperability Guideline v1.1a
* 1.2: DisplayPort 1.2
*
* 1.2 formally includes both eDP and DPI definitions.
*/
 
#define AUX_NATIVE_WRITE 0x8
#define AUX_NATIVE_READ 0x9
53,7 → 65,7
 
#define DP_MAX_LANE_COUNT 0x002
# define DP_MAX_LANE_COUNT_MASK 0x1f
# define DP_TPS3_SUPPORTED (1 << 6)
# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */
# define DP_ENHANCED_FRAME_CAP (1 << 7)
 
#define DP_MAX_DOWNSPREAD 0x003
69,15 → 81,33
/* 10b = TMDS or HDMI */
/* 11b = Other */
# define DP_FORMAT_CONVERSION (1 << 3)
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
 
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
 
#define DP_EDP_CONFIGURATION_CAP 0x00d
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */
# define DP_OUI_SUPPORT (1 << 7)
 
#define DP_PSR_SUPPORT 0x070
#define DP_I2C_SPEED_CAP 0x00c /* DPI */
# define DP_I2C_SPEED_1K 0x01
# define DP_I2C_SPEED_5K 0x02
# define DP_I2C_SPEED_10K 0x04
# define DP_I2C_SPEED_100K 0x08
# define DP_I2C_SPEED_400K 0x10
# define DP_I2C_SPEED_1M 0x20
 
#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
 
/* Multiple stream transport */
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
 
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
#define DP_PSR_CAPS 0x071
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
# define DP_PSR_SETUP_TIME_330 (0 << 1)
# define DP_PSR_SETUP_TIME_275 (1 << 1)
89,11 → 119,36
# define DP_PSR_SETUP_TIME_MASK (7 << 1)
# define DP_PSR_SETUP_TIME_SHIFT 1
 
/*
* 0x80-0x8f describe downstream port capabilities, but there are two layouts
* based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not,
* each port's descriptor is one byte wide. If it was set, each port's is
* four bytes wide, starting with the one byte from the base info. As of
* DP interop v1.1a only VGA defines additional detail.
*/
 
/* offset 0 */
#define DP_DOWNSTREAM_PORT_0 0x80
# define DP_DS_PORT_TYPE_MASK (7 << 0)
# define DP_DS_PORT_TYPE_DP 0
# define DP_DS_PORT_TYPE_VGA 1
# define DP_DS_PORT_TYPE_DVI 2
# define DP_DS_PORT_TYPE_HDMI 3
# define DP_DS_PORT_TYPE_NON_EDID 4
# define DP_DS_PORT_HPD (1 << 3)
/* offset 1 for VGA is maximum megapixels per second / 8 */
/* offset 2 */
# define DP_DS_VGA_MAX_BPC_MASK (3 << 0)
# define DP_DS_VGA_8BPC 0
# define DP_DS_VGA_10BPC 1
# define DP_DS_VGA_12BPC 2
# define DP_DS_VGA_16BPC 3
 
/* link configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
 
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
103,7 → 158,7
# define DP_TRAINING_PATTERN_DISABLE 0
# define DP_TRAINING_PATTERN_1 1
# define DP_TRAINING_PATTERN_2 2
# define DP_TRAINING_PATTERN_3 3
# define DP_TRAINING_PATTERN_3 3 /* 1.2 */
# define DP_TRAINING_PATTERN_MASK 0x3
 
# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
144,16 → 199,32
 
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
 
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
 
#define DP_PSR_EN_CFG 0x170
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
 
#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */
 
#define DP_MSTM_CTRL 0x111 /* 1.2 */
# define DP_MST_EN (1 << 0)
# define DP_UP_REQ_EN (1 << 1)
# define DP_UPSTREAM_IS_SRC (1 << 2)
 
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1)
# define DP_PSR_CRC_VERIFICATION (1 << 2)
# define DP_PSR_FRAME_CAPTURE (1 << 3)
 
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
# define DP_SINK_CP_READY (1 << 6)
 
#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201
# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0)
# define DP_AUTOMATED_TEST_REQUEST (1 << 1)
160,8 → 231,6
# define DP_CP_IRQ (1 << 2)
# define DP_SINK_SPECIFIC_IRQ (1 << 6)
 
#define DP_EDP_CONFIGURATION_SET 0x10a
 
#define DP_LANE0_1_STATUS 0x202
#define DP_LANE2_3_STATUS 0x203
# define DP_LANE_CR_DONE (1 << 0)
213,18 → 282,22
# define DP_TEST_NAK (1 << 1)
# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2)
 
#define DP_SOURCE_OUI 0x300
#define DP_SINK_OUI 0x400
#define DP_BRANCH_OUI 0x500
 
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
 
#define DP_PSR_ERROR_STATUS 0x2006
#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */
# define DP_PSR_LINK_CRC_ERROR (1 << 0)
# define DP_PSR_RFB_STORAGE_ERROR (1 << 1)
 
#define DP_PSR_ESI 0x2007
#define DP_PSR_ESI 0x2007 /* XXX 1.2? */
# define DP_PSR_CAPS_CHANGE (1 << 0)
 
#define DP_PSR_STATUS 0x2008
#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */
# define DP_PSR_SINK_INACTIVE 0
# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1
# define DP_PSR_SINK_ACTIVE_RFB 2
/drivers/include/drm/drm_edid.h
90,12 → 90,26
u8 min_hfreq_khz;
u8 max_hfreq_khz;
u8 pixel_clock_mhz; /* need to multiply by 10 */
__le16 sec_gtf_toggle; /* A000=use above, 20=use below */
u8 flags;
union {
struct {
u8 reserved;
u8 hfreq_start_khz; /* need to multiply by 2 */
u8 c; /* need to divide by 2 */
__le16 m;
u8 k;
u8 j; /* need to divide by 2 */
} __attribute__((packed)) gtf2;
struct {
u8 version;
u8 data1; /* high 6 bits: extra clock resolution */
u8 data2; /* plus low 2 of above: max hactive */
u8 supported_aspects;
u8 flags; /* preferred aspect and blanking support */
u8 supported_scalings;
u8 preferred_refresh;
} __attribute__((packed)) cvt;
} formula;
} __attribute__((packed));
 
struct detailed_data_wpindex {
238,5 → 252,6
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
int drm_load_edid_firmware(struct drm_connector *connector);
 
#endif /* __DRM_EDID_H__ */
/drivers/include/drm/drm_fb_helper.h
34,7 → 34,6
 
 
struct drm_fb_helper_crtc {
uint32_t crtc_id;
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
};
73,7 → 72,6
int connector_count;
struct drm_fb_helper_connector **connector_info;
struct drm_fb_helper_funcs *funcs;
int conn_limit;
struct fb_info *fbdev;
u32 pseudo_palette[17];
struct list_head kernel_fb_list;
/drivers/include/drm/drm_fixed.h
37,6 → 37,7
#define dfixed_init(A) { .full = dfixed_const((A)) }
#define dfixed_init_half(A) { .full = dfixed_const_half((A)) }
#define dfixed_trunc(A) ((A).full >> 12)
#define dfixed_frac(A) ((A).full & ((1 << 12) - 1))
 
static inline u32 dfixed_floor(fixed20_12 A)
{
/drivers/include/drm/drm_fourcc.h
106,9 → 106,10
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */
 
/* 2 non contiguous plane YCbCr */
#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */
/* special NV12 tiled format */
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */
 
/*
131,7 → 132,4
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
/* 3 non contiguous plane YCbCr */
#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
 
#endif /* DRM_FOURCC_H */
/drivers/include/drm/drm_mm.h
50,6 → 50,7
unsigned scanned_next_free : 1;
unsigned scanned_preceeds_hole : 1;
unsigned allocated : 1;
unsigned long color;
unsigned long start;
unsigned long size;
struct drm_mm *mm;
66,6 → 67,7
spinlock_t unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
73,6 → 75,9
unsigned long scan_start;
unsigned long scan_end;
struct drm_mm_node *prev_scanned_node;
 
void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
unsigned long *start, unsigned long *end);
};
 
static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
100,11 → 105,13
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic);
extern struct drm_mm_node *drm_mm_get_block_range_generic(
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic);
112,13 → 119,13
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0);
return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 1);
return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
127,9 → 134,20
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_color_block_range(
struct drm_mm_node *parent,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, color,
start, end, 0);
}
static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
struct drm_mm_node *parent,
unsigned long size,
137,7 → 155,7
unsigned long start,
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment,
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
start, end, 1);
}
extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
149,18 → 167,59
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
int best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range(
unsigned long color,
bool best_match);
extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int best_match);
extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
bool best_match);
static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long start,
unsigned long end,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
start, end, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
bool best_match)
{
return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
bool best_match)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
start, end, best_match);
}
extern int drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
171,10 → 230,14
return block->mm;
}
 
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment);
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color);
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
/drivers/include/drm/drm_mode.h
27,6 → 27,8
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
 
#include <linux/types.h>
 
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
228,6 → 230,7
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
 
struct drm_mode_property_enum {
__u64 value;
252,6 → 255,21
__u32 connector_id;
};
 
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_obj_set_property {
__u64 value;
__u32 prop_id;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
341,8 → 359,9
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
#define DRM_MODE_CURSOR_BO 0x01
#define DRM_MODE_CURSOR_MOVE 0x02
#define DRM_MODE_CURSOR_FLAGS 0x03
 
/*
* depending on the value in flags different members are used.
/drivers/include/drm/drm_pciids.h
1,7 → 1,3
/*
This file is auto-generated from the drm_pciids.txt in the DRM CVS
Please contact dri-devel@lists.sf.net to add new cards to this list
*/
#define radeon_PCI_IDS \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
181,6 → 177,7
{0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
198,9 → 195,64
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x684C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
242,6 → 294,7
{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
483,7 → 536,10
{0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
498,6 → 554,33
{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9904, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9905, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9906, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
 
 
/drivers/include/drm/i915_drm.h
0,0 → 1,953
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef _UAPI_I915_DRM_H_
#define _UAPI_I915_DRM_H_
 
#include <drm/drm.h>
 
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
 
/* For use by IPS driver */
extern unsigned long i915_read_mch_val(void);
extern bool i915_gpu_raise(void);
extern bool i915_gpu_lower(void);
extern bool i915_gpu_busy(void);
extern bool i915_gpu_turbo_disable(void);
 
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
* of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14
 
typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
I915_RESUME_DMA = 0x03
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
} drm_i915_init_t;
 
typedef struct _drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
 
drm_handle_t front_handle;
int front_offset;
int front_size;
 
drm_handle_t back_handle;
int back_offset;
int back_size;
 
drm_handle_t depth_handle;
int depth_offset;
int depth_size;
 
drm_handle_t tex_handle;
int tex_offset;
int tex_size;
int log_tex_granularity;
int pitch;
int rotation; /* 0, 90, 180 or 270 */
int rotated_offset;
int rotated_size;
int rotated_pitch;
int virtualX, virtualY;
 
unsigned int front_tiled;
unsigned int back_tiled;
unsigned int depth_tiled;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
 
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
 
/* fill out some space for old userspace triple buffer */
drm_handle_t unused_handle;
__u32 unused1, unused2, unused3;
 
/* buffer object handles for static buffers. May change
* over the lifetime of the client.
*/
__u32 front_bo_handle;
__u32 back_bo_handle;
__u32 unused_bo_handle;
__u32 depth_bo_handle;
 
} drm_i915_sarea_t;
 
/* due to userspace building against these headers we need some compat here */
#define planeA_x pipeA_x
#define planeA_y pipeA_y
#define planeA_w pipeA_w
#define planeA_h pipeA_h
#define planeB_x pipeB_x
#define planeB_y pipeB_y
#define planeB_w pipeB_w
#define planeB_h pipeB_h
 
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
#define I915_BOX_FLIP 0x2
#define I915_BOX_WAIT 0x4
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
 
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
#define DRM_I915_FLIP 0x02
#define DRM_I915_BATCHBUFFER 0x03
#define DRM_I915_IRQ_EMIT 0x04
#define DRM_I915_IRQ_WAIT 0x05
#define DRM_I915_GETPARAM 0x06
#define DRM_I915_SETPARAM 0x07
#define DRM_I915_ALLOC 0x08
#define DRM_I915_FREE 0x09
#define DRM_I915_INIT_HEAP 0x0a
#define DRM_I915_CMDBUFFER 0x0b
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_I915_GEM_SET_CACHING 0x2f
#define DRM_I915_GEM_GET_CACHING 0x30
#define DRM_I915_REG_READ 0x31
 
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
#define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
#define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
#define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
#define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
#define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
#define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
#define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
#define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
#define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
#define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
#define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
#define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
 
/* As above, but pass a pointer to userspace buffer which can be
* validated by the kernel prior to sending to hardware.
*/
typedef struct _drm_i915_cmdbuffer {
char __user *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
 
/* Userspace can request & wait on irq's:
*/
typedef struct drm_i915_irq_emit {
int __user *irq_seq;
} drm_i915_irq_emit_t;
 
typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
 
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_SEMAPHORES 20
#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
 
typedef struct drm_i915_getparam {
int param;
int __user *value;
} drm_i915_getparam_t;
 
/* Ioctl to set kernel params:
*/
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
 
typedef struct drm_i915_setparam {
int param;
int value;
} drm_i915_setparam_t;
 
/* A memory manager for regions of shared memory:
*/
#define I915_MEM_REGION_AGP 1
 
typedef struct drm_i915_mem_alloc {
int region;
int alignment;
int size;
int __user *region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc_t;
 
typedef struct drm_i915_mem_free {
int region;
int region_offset;
} drm_i915_mem_free_t;
 
typedef struct drm_i915_mem_init_heap {
int region;
int size;
int start;
} drm_i915_mem_init_heap_t;
 
/* Allow memory manager to be torn down and re-initialized (eg on
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
} drm_i915_mem_destroy_heap_t;
 
/* Allow X server to configure which pipes to monitor for vblank signals
*/
#define DRM_I915_VBLANK_PIPE_A 1
#define DRM_I915_VBLANK_PIPE_B 2
 
typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
 
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
 
typedef struct drm_i915_hws_addr {
__u64 addr;
} drm_i915_hws_addr_t;
 
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_end;
};
 
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
__u32 pad;
};
 
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
__u32 pad;
/** Offset into the object to read from */
__u64 offset;
/** Length of data to read */
__u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
 
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
__u32 handle;
__u32 pad;
/** Offset into the object to write to */
__u64 offset;
/** Length of data to write */
__u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
 
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/** Offset in the object to map. */
__u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
__u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
};
 
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
 
/** New read domains */
__u32 read_domains;
 
/** New write domain */
__u32 write_domain;
};
 
struct drm_i915_gem_sw_finish {
/** Handle for the object */
__u32 handle;
};
 
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
__u32 target_handle;
 
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
__u32 delta;
 
/** Offset in the buffer the relocation entry will be written into */
__u64 offset;
 
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
__u64 presumed_offset;
 
/**
* Target memory domains read by this operation.
*/
__u32 read_domains;
 
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
__u32 write_domain;
};
 
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
 
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
 
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
 
/** Required alignment in graphics aperture */
__u64 alignment;
 
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
};
 
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
__u64 buffers_ptr;
__u32 buffer_count;
 
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
};
 
struct drm_i915_gem_exec_object2 {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
 
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
 
/** Required alignment in graphics aperture */
__u64 alignment;
 
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
 
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
 
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
*/
__u64 buffers_ptr;
__u32 buffer_count;
 
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
 
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
* Gen6+ only supports relative addressing to dynamic state (default) and
* absolute addressing.
*
* These flags are ignored for the BSD and BLT rings.
*/
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
 
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
 
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
#define i915_execbuffer2_get_context_id(eb2) \
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
 
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
__u32 pad;
 
/** alignment required within the aperture */
__u64 alignment;
 
/** Returned GTT offset of the buffer. */
__u64 offset;
};
 
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
__u32 handle;
__u32 pad;
};
 
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
 
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy;
};
 
#define I915_CACHING_NONE 0
#define I915_CACHING_CACHED 1
 
struct drm_i915_gem_caching {
/**
* Handle of the buffer to set/get the caching level of. */
__u32 handle;
 
/**
* Cacheing level to apply or return value
*
* bits0-15 are for generic caching control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 caching;
};
 
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
 
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
/* Seen by userland. */
#define I915_BIT_6_SWIZZLE_9_17 6
#define I915_BIT_6_SWIZZLE_9_10_17 7
 
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
__u32 handle;
 
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
__u32 tiling_mode;
 
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
__u32 stride;
 
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
 
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
__u32 handle;
 
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
__u32 tiling_mode;
 
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
 
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
__u64 aper_size;
 
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
__u64 aper_available_size;
};
 
struct drm_i915_get_pipe_from_crtc_id {
/** ID of CRTC being requested **/
__u32 crtc_id;
 
/** pipe of requested CRTC **/
__u32 pipe;
};
 
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
#define __I915_MADV_PURGED 2 /* internal state */
 
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
__u32 handle;
 
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
*/
__u32 madv;
 
/** Whether the backing store still exists. */
__u32 retained;
};
 
/* flags */
#define I915_OVERLAY_TYPE_MASK 0xff
#define I915_OVERLAY_YUV_PLANAR 0x01
#define I915_OVERLAY_YUV_PACKED 0x02
#define I915_OVERLAY_RGB 0x03
 
#define I915_OVERLAY_DEPTH_MASK 0xff00
#define I915_OVERLAY_RGB24 0x1000
#define I915_OVERLAY_RGB16 0x2000
#define I915_OVERLAY_RGB15 0x3000
#define I915_OVERLAY_YUV422 0x0100
#define I915_OVERLAY_YUV411 0x0200
#define I915_OVERLAY_YUV420 0x0300
#define I915_OVERLAY_YUV410 0x0400
 
#define I915_OVERLAY_SWAP_MASK 0xff0000
#define I915_OVERLAY_NO_SWAP 0x000000
#define I915_OVERLAY_UV_SWAP 0x010000
#define I915_OVERLAY_Y_SWAP 0x020000
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
 
#define I915_OVERLAY_FLAGS_MASK 0xff000000
#define I915_OVERLAY_ENABLE 0x01000000
 
struct drm_intel_overlay_put_image {
/* various flags and src format description */
__u32 flags;
/* source picture description */
__u32 bo_handle;
/* stride values and offsets are in bytes, buffer relative */
__u16 stride_Y; /* stride for packed formats */
__u16 stride_UV;
__u32 offset_Y; /* offset for packet formats */
__u32 offset_U;
__u32 offset_V;
/* in pixels */
__u16 src_width;
__u16 src_height;
/* to compensate the scaling factors for partially covered surfaces */
__u16 src_scan_width;
__u16 src_scan_height;
/* output crtc description */
__u32 crtc_id;
__u16 dst_x;
__u16 dst_y;
__u16 dst_width;
__u16 dst_height;
};
 
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
__s32 brightness;
__u32 contrast;
__u32 saturation;
__u32 gamma0;
__u32 gamma1;
__u32 gamma2;
__u32 gamma3;
__u32 gamma4;
__u32 gamma5;
};
 
/*
* Intel sprite handling
*
* Color keying works with a min/mask/max tuple. Both source and destination
* color keying is allowed.
*
* Source keying:
* Sprite pixels within the min & max values, masked against the color channels
* specified in the mask field, will be transparent. All other pixels will
* be displayed on top of the primary plane. For RGB surfaces, only the min
* and mask fields will be used; ranged compares are not allowed.
*
* Destination keying:
* Primary plane pixels that match the min value, masked against the color
* channels specified in the mask field, will be replaced by corresponding
* pixels from the sprite plane.
*
* Note that source & destination keying are exclusive; only one can be
* active on a given plane.
*/
 
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
__u32 plane_id;
__u32 min_value;
__u32 channel_mask;
__u32 max_value;
__u32 flags;
};
 
struct drm_i915_gem_wait {
/** Handle of BO we shall wait on */
__u32 bo_handle;
__u32 flags;
/** Number of nanoseconds to wait, Returns time remaining. */
__s64 timeout_ns;
};
 
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
__u32 pad;
};
 
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
 
struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
#endif /* _UAPI_I915_DRM_H_ */
/drivers/include/drm/intel-gtt.h
3,6 → 3,8
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
 
struct agp_bridge_data;
 
const struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
15,19 → 17,24
unsigned int needs_dmar : 1;
/* Whether we idle the gpu before mapping/unmapping */
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */
phys_addr_t gma_bus_addr;
} *intel_gtt_get(void);
 
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(void);
 
bool intel_enable_gtt(void);
 
void intel_gtt_chipset_flush(void);
void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
void intel_gtt_insert_sg_entries(struct pagelist *st, unsigned int pg_start,
unsigned int flags);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
struct scatterlist **sg_list, int *num_sg);
void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
unsigned int sg_len,
unsigned int pg_start,
unsigned int flags);
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags);
 
/* Special gtt memory types */
#define AGP_DCACHE_MEMORY 1
40,4 → 47,8
/* flag for GFDT type */
#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
#ifdef CONFIG_INTEL_IOMMU
extern int intel_iommu_gfx_mapped;
#endif
 
#endif
/drivers/include/drm/radeon_drm.h
33,7 → 33,7
#ifndef __RADEON_DRM_H__
#define __RADEON_DRM_H__
 
#include "drm.h"
#include <drm/drm.h>
 
/* WARNING: If you change any of these defines, make sure to change the
* defines in the X server file (radeon_sarea.h)
509,6 → 509,7
#define DRM_RADEON_GEM_SET_TILING 0x28
#define DRM_RADEON_GEM_GET_TILING 0x29
#define DRM_RADEON_GEM_BUSY 0x2a
#define DRM_RADEON_GEM_VA 0x2b
 
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t)
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START)
807,9 → 808,19
#define RADEON_TILING_MICRO 0x2
#define RADEON_TILING_SWAP_16BIT 0x4
#define RADEON_TILING_SWAP_32BIT 0x8
#define RADEON_TILING_SURFACE 0x10 /* this object requires a surface
* when mapped - i.e. front buffer */
/* this object requires a surface when mapped - i.e. front buffer */
#define RADEON_TILING_SURFACE 0x10
#define RADEON_TILING_MICRO_SQUARE 0x20
#define RADEON_TILING_EG_BANKW_SHIFT 8
#define RADEON_TILING_EG_BANKW_MASK 0xf
#define RADEON_TILING_EG_BANKH_SHIFT 12
#define RADEON_TILING_EG_BANKH_MASK 0xf
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT 16
#define RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK 0xf
#define RADEON_TILING_EG_TILE_SPLIT_SHIFT 24
#define RADEON_TILING_EG_TILE_SPLIT_MASK 0xf
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT 28
#define RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK 0xf
 
struct drm_radeon_gem_set_tiling {
uint32_t handle;
897,6 → 908,7
#define RADEON_CHUNK_ID_RELOCS 0x01
#define RADEON_CHUNK_ID_IB 0x02
#define RADEON_CHUNK_ID_FLAGS 0x03
#define RADEON_CHUNK_ID_CONST_IB 0x04
 
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
914,7 → 926,6
};
 
/* drm_radeon_cs_reloc.flags */
#define RADEON_RELOC_DONT_SYNC 0x01
 
struct drm_radeon_cs_reloc {
uint32_t handle;
951,6 → 962,10
#define RADEON_INFO_VA_START 0x0e
/* maximum size of ib using the virtual memory cs */
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
/* max pipes - needed for compute shaders */
#define RADEON_INFO_MAX_PIPES 0x10
/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
#define RADEON_INFO_TIMESTAMP 0x11
 
struct drm_radeon_info {
uint32_t request;
/drivers/include/drm/ttm/ttm_bo_api.h
81,14 → 81,17
*/
 
struct ttm_mem_reg {
struct drm_mm_node *mm_node;
void *mm_node;
unsigned long start;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
// struct ttm_bus_placement bus;
};
 
 
/**
* enum ttm_bo_type
*
/drivers/include/linux/asm/alternative.h
129,7 → 129,7
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
#define ASM_OUTPUT2(a, b) a, b
#define ASM_OUTPUT2(a) a
 
struct paravirt_patch_site;
#ifdef CONFIG_PARAVIRT
/drivers/include/linux/asm/atomic_32.h
266,8 → 266,8
u64 __aligned(8) counter;
} atomic64_t;
 
#define ATOMIC64_INIT(val) { (val) }
 
 
extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
 
/**
278,8 → 278,22
* Atomically xchgs the value of @ptr to @new_val and returns
* the old value.
*/
extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
static inline long long atomic64_xchg(atomic64_t *v, long long n)
{
long long o;
unsigned high = (unsigned)(n >> 32);
unsigned low = (unsigned)n;
 
asm volatile(
"1: \n\t"
"cmpxchg8b (%%esi) \n\t"
"jnz 1b \n\t"
:"=&A" (o)
:"S" (v), "b" (low), "c" (high)
: "memory", "cc");
return o;
}
 
/**
* atomic64_set - set atomic64 variable
* @ptr: pointer to type atomic64_t
287,8 → 301,21
*
* Atomically sets the value of @ptr to @new_val.
*/
extern void atomic64_set(atomic64_t *ptr, u64 new_val);
 
static inline void atomic64_set(atomic64_t *v, long long i)
{
unsigned high = (unsigned)(i >> 32);
unsigned low = (unsigned)i;
asm volatile (
"1: \n\t"
"cmpxchg8b (%%esi) \n\t"
"jnz 1b \n\t"
:
:"S" (v), "b" (low), "c" (high)
: "eax", "edx", "memory", "cc");
}
 
 
/**
* atomic64_read - read atomic64 variable
* @ptr: pointer to type atomic64_t
317,7 → 344,6
return res;
}
 
extern u64 atomic64_read(atomic64_t *ptr);
 
/**
* atomic64_add_return - add and return
/drivers/include/linux/asm/bitops.h
15,6 → 15,8
#include <linux/compiler.h>
#include <asm/alternative.h>
 
#define BIT_64(n) (U64_C(1) << (n))
 
/*
* These have to be done with inline assembly: that way the bit-setting
* is guaranteed to be atomic. All bit operations return 0 if the bit
262,6 → 264,13
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*
* Note: the operation is performed atomically with respect to
* the local CPU, but not other CPUs. Portable code should not
* rely on this behaviour.
* KVM relies on this behaviour on x86 for modifying memory that is also
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
309,7 → 318,7
static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
{
return ((1UL << (nr % BITS_PER_LONG)) &
(((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
(addr[nr / BITS_PER_LONG])) != 0;
}
 
static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
346,7 → 355,7
*/
static inline unsigned long __ffs(unsigned long word)
{
asm("bsf %1,%0"
asm("rep; bsf %1,%0"
: "=r" (word)
: "rm" (word));
return word;
360,7 → 369,7
*/
static inline unsigned long ffz(unsigned long word)
{
asm("bsf %1,%0"
asm("rep; bsf %1,%0"
: "=r" (word)
: "r" (~word));
return word;
380,6 → 389,8
return word;
}
 
#undef ADDR
 
#ifdef __KERNEL__
/**
* ffs - find first set bit in word
398,7 → 409,7
#ifdef CONFIG_X86_CMOV
asm("bsfl %1,%0\n\t"
"cmovzl %2,%0"
: "=r" (r) : "rm" (x), "r" (-1));
: "=&r" (r) : "rm" (x), "r" (-1));
#else
asm("bsfl %1,%0\n\t"
"jnz 1f\n\t"
/drivers/include/linux/asm/cpufeature.h
6,7 → 6,7
 
#include <asm/required-features.h>
 
#define NCAPINTS 9 /* N 32-bit words worth of info */
#define NCAPINTS 10 /* N 32-bit words worth of info */
 
/*
* Note: If the comment begins with a quoted string, that string is used
89,7 → 89,7
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
/* 21 available, was AMD_C1E */
#define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */
#define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */
#define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */
97,6 → 97,7
#define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */
#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */
 
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */
114,6 → 115,7
#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
#define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */
#define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */
#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
#define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */
#define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */
120,10 → 122,13
#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
#define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */
#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
#define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */
#define X86_FEATURE_AES (4*32+25) /* AES instructions */
#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
#define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */
#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
#define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */
#define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */
#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */
 
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
150,24 → 155,63
#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
#define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */
#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
#define X86_FEATURE_TCE (6*32+17) /* translation cache extension */
#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
#define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */
 
/*
* Auxiliary flags: Linux defined - For features scattered in various
* CPUID levels like 0x6, 0xA etc
* CPUID levels like 0x6, 0xA etc, word 7
*/
#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
#define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */
#define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */
#define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
 
/* Virtualization flags: Linux defined */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */
#define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */
#define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */
#define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */
#define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */
#define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */
#define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */
#define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
#define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
#define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
#define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
#define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */
#define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
#define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
#define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
 
 
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
 
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
#include <linux/bitops.h>
178,8 → 222,7
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
 
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && \
#define REQUIRED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
(((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
(((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
187,10 → 230,18
(((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
(((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
(((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
? 1 : \
(((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
(((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
(((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
 
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
test_cpu_cap(c, bit))
 
#define this_cpu_has(bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
x86_this_cpu_test_bit(bit, (unsigned long *)&cpu_info.x86_capability))
 
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
 
#define set_cpu_cap(c, bit) set_bit(bit, (unsigned long *)((c)->x86_capability))
219,7 → 270,9
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
#define cpu_has_mp boot_cpu_has(X86_FEATURE_MP)
#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
247,8 → 300,14
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
 
#if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64)
# define cpu_has_invlpg 1
/drivers/include/linux/asm/div64.h
4,6 → 4,7
#ifdef CONFIG_X86_32
 
#include <linux/types.h>
#include <linux/log2.h>
 
/*
* do_div() is NOT a C function. It wants to return
21,6 → 22,10
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
__mod = n & (__base - 1); \
n >>= ilog2(__base); \
} else { \
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
__upper = __high; \
if (__high) { \
30,6 → 35,7
asm("divl %2":"=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n) : "a" (__low), "d" (__high)); \
} \
__mod; \
})
 
/drivers/include/linux/asm/required-features.h
84,5 → 84,7
#define REQUIRED_MASK5 0
#define REQUIRED_MASK6 0
#define REQUIRED_MASK7 0
#define REQUIRED_MASK8 0
#define REQUIRED_MASK9 0
 
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
/drivers/include/linux/backlight.h
--- include/linux/bitops.h (revision 3030)
+++ include/linux/bitops.h (revision 3031)
@@ -26,6 +26,23 @@
(bit) < (size); \
(bit) = find_next_bit((addr), (size), (bit) + 1))
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_first_zero_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
static __inline__ int get_bitmask_order(unsigned int count)
{
int order;
@@ -50,6 +67,26 @@
}
/**
+ * rol64 - rotate a 64-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 rol64(__u64 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (64 - shift));
+}
+
+/**
+ * ror64 - rotate a 64-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 ror64(__u64 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (64 - shift));
+}
+
+/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
/drivers/include/linux/bug.h
0,0 → 1,12
#ifndef _ASM_GENERIC_BUG_H
#define _ASM_GENERIC_BUG_H
 
 
 
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
unlikely(__ret_warn_on); \
})
 
 
#endif
/drivers/include/linux/compiler-gcc.h
83,6 → 83,7
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
#define __printf(a,b) __attribute__((format(printf,a,b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define noinline __attribute__((noinline))
#define __attribute_const__ __attribute__((__const__))
#define __maybe_unused __attribute__((unused))
/drivers/include/linux/compiler-gcc4.h
29,6 → 29,7
the kernel context */
#define __cold __attribute__((__cold__))
 
#define __linktime_error(message) __attribute__((__error__(message)))
 
#if __GNUC_MINOR__ >= 5
/*
48,10 → 49,17
#endif
#endif
 
#if __GNUC_MINOR__ >= 6
/*
* Tell the optimizer that something else uses this function or variable.
*/
#define __visible __attribute__((externally_visible))
#endif
 
#if __GNUC_MINOR__ > 0
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
#if __GNUC_MINOR__ >= 4 && !defined(__CHECKER__)
#if __GNUC_MINOR__ >= 3 && !defined(__CHECKER__)
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
/drivers/include/linux/compiler.h
236,7 → 236,7
 
/*
* Rather then using noinline to prevent stack consumption, use
* noinline_for_stack instead. For documentaiton reasons.
* noinline_for_stack instead. For documentation reasons.
*/
#define noinline_for_stack noinline
 
278,6 → 278,10
# define __section(S) __attribute__ ((__section__(#S)))
#endif
 
#ifndef __visible
#define __visible
#endif
 
/* Are two types/vars the same type (ignoring qualifiers)? */
#ifndef __same_type
# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
293,7 → 297,9
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
 
#ifndef __linktime_error
# define __linktime_error(message)
#endif
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
/drivers/include/linux/delay.h
--- include/linux/dmapool.h (revision 3030)
+++ include/linux/dmapool.h (revision 3031)
@@ -21,6 +21,12 @@
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
+/*
+ * Managed DMA pool
+ */
+struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
+ size_t size, size_t align, size_t allocation);
+void dmam_pool_destroy(struct dma_pool *pool);
#endif
/drivers/include/linux/errno-base.h
0,0 → 1,39
#ifndef _ASM_GENERIC_ERRNO_BASE_H
#define _ASM_GENERIC_ERRNO_BASE_H
 
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Argument list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
 
#endif
/drivers/include/linux/export.h
0,0 → 1,19
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
/*
* Export symbols from the kernel to modules. Forked from module.h
* to reduce the amount of pointless cruft we feed to gcc when only
* exporting a simple symbol or two.
*
* If you feel the need to add #include <linux/foo.h> to this file
* then you are doing something wrong and should go away silently.
*/
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
#define EXPORT_SYMBOL_GPL_FUTURE(sym)
#define EXPORT_UNUSED_SYMBOL(sym)
#define EXPORT_UNUSED_SYMBOL_GPL(sym)
 
#define THIS_MODULE ((struct module *)0)
 
#endif /* _LINUX_EXPORT_H */
/drivers/include/linux/fb.h
549,6 → 549,10
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
/* A hardware display blank early change occured */
#define FB_EARLY_EVENT_BLANK 0x10
/* A hardware display blank revert early change occured */
#define FB_R_EARLY_EVENT_BLANK 0x11
 
struct fb_event {
struct fb_info *info;
599,6 → 603,7
struct mutex lock; /* mutex that protects the page list */
struct list_head pagelist; /* list of touched pages */
/* callback */
void (*first_io)(struct fb_info *info);
void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
};
#endif
990,6 → 995,7
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern int unregister_framebuffer(struct fb_info *fb_info);
extern int unlink_framebuffer(struct fb_info *fb_info);
extern void remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
1112,6 → 1118,7
 
/* drivers/video/fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to);
extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to);
1139,6 → 1146,7
 
extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct fb_videomode cea_modes[64];
 
struct fb_modelist {
struct list_head list;
/drivers/include/linux/i2c-algo-bit.h
15,7 → 15,8
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA. */
/* ------------------------------------------------------------------------- */
 
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
49,5 → 50,6
 
int i2c_bit_add_bus(struct i2c_adapter *);
int i2c_bit_add_numbered_bus(struct i2c_adapter *);
extern const struct i2c_algorithm i2c_bit_algo;
 
#endif /* _LINUX_I2C_ALGO_BIT_H */
/drivers/include/linux/i2c.h
17,12 → 17,12
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA. */
/* ------------------------------------------------------------------------- */
 
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
Frodo Looijaard <frodol@dds.nl> */
 
#ifndef _LINUX_I2C_H
#define _LINUX_I2C_H
 
32,6 → 32,8
#include <linux/i2c-id.h>
#include <linux/mod_devicetable.h>
 
extern struct bus_type i2c_bus_type;
extern struct device_type i2c_adapter_type;
 
/* --- General options ------------------------------------------------ */
 
70,7 → 72,7
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
*
* For automatic device detection, both @detect and @address_data must
* For automatic device detection, both @detect and @address_list must
* be defined. @class should also be set, otherwise only devices forced
* with module parameters will be created. The detect function must
* fill at least the name field of the i2c_board_info structure it is
271,6 → 273,8
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
 
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
/drivers/include/linux/ioport.h
35,8 → 35,9
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
 
#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */
#define IORESOURCE_IO 0x00000100
#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
#define IORESOURCE_MEM 0x00000200
#define IORESOURCE_REG 0x00000300 /* Register offsets */
#define IORESOURCE_IRQ 0x00000400
#define IORESOURCE_DMA 0x00000800
#define IORESOURCE_BUS 0x00001000
/drivers/include/linux/jiffies.h
71,16 → 71,10
/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
 
#define jiffies GetTimerTicks()
 
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
return (u64)GetTimerTicks();
}
#endif
 
/*
* These inlines deal with timer wrapping correctly. You are
295,7 → 289,13
extern unsigned long timeval_to_jiffies(const struct timeval *value);
extern void jiffies_to_timeval(const unsigned long jiffies,
struct timeval *value);
 
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
}
 
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
/drivers/include/linux/kernel.h
29,6 → 29,7
#define LLONG_MAX ((long long)(~0ULL>>1))
#define LLONG_MIN (-LLONG_MAX - 1)
#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
 
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
306,7 → 307,10
writel(val >> 32, addr+4);
}
 
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
 
 
#define mmiowb() barrier()
 
#define dev_err(dev, format, arg...) \
329,6 → 333,34
unsigned int dma_length;
};
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
 
#define SG_MAX_SINGLE_ALLOC (4096 / sizeof(struct scatterlist))
 
struct scatterlist *sg_next(struct scatterlist *sg);
 
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
 
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
 
static inline addr_t sg_page(struct scatterlist *sg)
{
return (addr_t)((sg)->page_link & ~0x3);
}
 
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 
 
 
struct page
{
unsigned int addr;
347,6 → 379,10
*/
};
 
struct pagelist {
dma_addr_t *page;
unsigned int nents;
};
 
#endif
 
/drivers/include/linux/log2.h
0,0 → 1,208
/* Integer base 2 logarithm calculation
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
 
#ifndef _LINUX_LOG2_H
#define _LINUX_LOG2_H
 
#include <linux/types.h>
#include <linux/bitops.h>
 
/*
* deal with unrepresentable constant logarithms
*/
extern __attribute__((const, noreturn))
int ____ilog2_NaN(void);
 
/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
* - the arch is not required to handle n==0 if implementing the fallback
*/
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
static inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls(n) - 1;
}
#endif
 
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
static inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
#endif
 
/*
* Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
*/
 
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
 
/*
* round up to nearest power of two
*/
static inline __attribute__((const))
unsigned long __roundup_pow_of_two(unsigned long n)
{
return 1UL << fls_long(n - 1);
}
 
/*
* round down to nearest power of two
*/
static inline __attribute__((const))
unsigned long __rounddown_pow_of_two(unsigned long n)
{
return 1UL << (fls_long(n) - 1);
}
 
/**
* ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
* @n - parameter
*
* constant-capable log of base 2 calculation
* - this can be used to initialise global variables from constant data, hence
* the massive ternary operator construction
*
* selects the appropriately-sized optimised version depending on sizeof(n)
*/
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 1 ? ____ilog2_NaN() : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
(n) & (1ULL << 60) ? 60 : \
(n) & (1ULL << 59) ? 59 : \
(n) & (1ULL << 58) ? 58 : \
(n) & (1ULL << 57) ? 57 : \
(n) & (1ULL << 56) ? 56 : \
(n) & (1ULL << 55) ? 55 : \
(n) & (1ULL << 54) ? 54 : \
(n) & (1ULL << 53) ? 53 : \
(n) & (1ULL << 52) ? 52 : \
(n) & (1ULL << 51) ? 51 : \
(n) & (1ULL << 50) ? 50 : \
(n) & (1ULL << 49) ? 49 : \
(n) & (1ULL << 48) ? 48 : \
(n) & (1ULL << 47) ? 47 : \
(n) & (1ULL << 46) ? 46 : \
(n) & (1ULL << 45) ? 45 : \
(n) & (1ULL << 44) ? 44 : \
(n) & (1ULL << 43) ? 43 : \
(n) & (1ULL << 42) ? 42 : \
(n) & (1ULL << 41) ? 41 : \
(n) & (1ULL << 40) ? 40 : \
(n) & (1ULL << 39) ? 39 : \
(n) & (1ULL << 38) ? 38 : \
(n) & (1ULL << 37) ? 37 : \
(n) & (1ULL << 36) ? 36 : \
(n) & (1ULL << 35) ? 35 : \
(n) & (1ULL << 34) ? 34 : \
(n) & (1ULL << 33) ? 33 : \
(n) & (1ULL << 32) ? 32 : \
(n) & (1ULL << 31) ? 31 : \
(n) & (1ULL << 30) ? 30 : \
(n) & (1ULL << 29) ? 29 : \
(n) & (1ULL << 28) ? 28 : \
(n) & (1ULL << 27) ? 27 : \
(n) & (1ULL << 26) ? 26 : \
(n) & (1ULL << 25) ? 25 : \
(n) & (1ULL << 24) ? 24 : \
(n) & (1ULL << 23) ? 23 : \
(n) & (1ULL << 22) ? 22 : \
(n) & (1ULL << 21) ? 21 : \
(n) & (1ULL << 20) ? 20 : \
(n) & (1ULL << 19) ? 19 : \
(n) & (1ULL << 18) ? 18 : \
(n) & (1ULL << 17) ? 17 : \
(n) & (1ULL << 16) ? 16 : \
(n) & (1ULL << 15) ? 15 : \
(n) & (1ULL << 14) ? 14 : \
(n) & (1ULL << 13) ? 13 : \
(n) & (1ULL << 12) ? 12 : \
(n) & (1ULL << 11) ? 11 : \
(n) & (1ULL << 10) ? 10 : \
(n) & (1ULL << 9) ? 9 : \
(n) & (1ULL << 8) ? 8 : \
(n) & (1ULL << 7) ? 7 : \
(n) & (1ULL << 6) ? 6 : \
(n) & (1ULL << 5) ? 5 : \
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
(n) & (1ULL << 1) ? 1 : \
(n) & (1ULL << 0) ? 0 : \
____ilog2_NaN() \
) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
)
 
/**
* roundup_pow_of_two - round the given value up to nearest power of two
* @n - parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
* - this can be used to initialise global variables from constant data
*/
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(n == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
)
 
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
* @n - parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
* - this can be used to initialise global variables from constant data
*/
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)
 
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
*
* The first few values calculated by this routine:
* ob2(0) = 0
* ob2(1) = 0
* ob2(2) = 1
* ob2(3) = 2
* ob2(4) = 2
* ob2(5) = 3
* ... and so on.
*/
 
#define order_base_2(n) ilog2(roundup_pow_of_two(n))
 
#endif /* _LINUX_LOG2_H */
/drivers/include/linux/math64.h
0,0 → 1,121
#ifndef _LINUX_MATH64_H
#define _LINUX_MATH64_H
 
#include <linux/types.h>
#include <asm/div64.h>
 
#if BITS_PER_LONG == 64
 
#define div64_long(x,y) div64_s64((x),(y))
 
/**
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
*
* This is commonly provided by 32bit archs to provide an optimized 64bit
* divide.
*/
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
 
/**
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
*/
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
*remainder = dividend % divisor;
return dividend / divisor;
}
 
/**
* div64_u64 - unsigned 64bit divide with 64bit divisor
*/
static inline u64 div64_u64(u64 dividend, u64 divisor)
{
return dividend / divisor;
}
 
/**
* div64_s64 - signed 64bit divide with 64bit divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
return dividend / divisor;
}
 
#elif BITS_PER_LONG == 32
 
#define div64_long(x,y) div_s64((x),(y))
 
#ifndef div_u64_rem
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
{
*remainder = do_div(dividend, divisor);
return dividend;
}
#endif
 
#ifndef div_s64_rem
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
#endif
 
#ifndef div64_u64
extern u64 div64_u64(u64 dividend, u64 divisor);
#endif
 
#ifndef div64_s64
extern s64 div64_s64(s64 dividend, s64 divisor);
#endif
 
#endif /* BITS_PER_LONG */
 
/**
* div_u64 - unsigned 64bit divide with 32bit divisor
*
* This is the most common 64bit divide and should be used if possible,
* as many 32bit archs can optimize this variant better than a full 64bit
* divide.
*/
#ifndef div_u64
static inline u64 div_u64(u64 dividend, u32 divisor)
{
u32 remainder;
return div_u64_rem(dividend, divisor, &remainder);
}
#endif
 
/**
* div_s64 - signed 64bit divide with 32bit divisor
*/
#ifndef div_s64
static inline s64 div_s64(s64 dividend, s32 divisor)
{
s32 remainder;
return div_s64_rem(dividend, divisor, &remainder);
}
#endif
 
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
 
static __always_inline u32
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
{
u32 ret = 0;
 
while (dividend >= divisor) {
/* The following asm() prevents the compiler from
optimising this loop into a modulo operation. */
asm("" : "+rm"(dividend));
 
dividend -= divisor;
ret++;
}
 
*remainder = dividend;
 
return ret;
}
 
#endif /* _LINUX_MATH64_H */
/drivers/include/linux/mod_devicetable.h
78,6 → 78,9
* of a given interface; other interfaces may support other classes.
* @bInterfaceSubClass: Subclass of interface; associated with bInterfaceClass.
* @bInterfaceProtocol: Protocol of interface; associated with bInterfaceClass.
* @bInterfaceNumber: Number of interface; composite devices may use
* fixed interface numbers to differentiate between vendor-specific
* interfaces.
* @driver_info: Holds information used by the driver. Usually it holds
* a pointer to a descriptor understood by the driver, or perhaps
* device flags.
130,12 → 133,15
#define USB_DEVICE_ID_MATCH_INT_CLASS 0x0080
#define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
#define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
#define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
 
#define HID_ANY_ID (~0)
#define HID_BUS_ANY 0xffff
#define HID_GROUP_ANY 0x0000
 
struct hid_device_id {
__u16 bus;
__u16 pad1;
__u16 group;
__u32 vendor;
__u32 product;
kernel_ulong_t driver_data
222,7 → 228,7
char type[32];
char compatible[128];
#ifdef __KERNEL__
void *data;
const void *data;
#else
kernel_ulong_t data;
#endif
/drivers/include/linux/module.h
11,15 → 11,14
#include <linux/kernel.h>
 
 
#define EXPORT_SYMBOL(x)
 
#define MODULE_FIRMWARE(x)
#define MODULE_AUTHOR(x);
#define MODULE_DESCRIPTION(x);
#define MODULE_LICENSE(x);
 
#define MODULE_PARM_DESC(_parm, desc)
 
#define MODULE_AUTHOR(x)
#define MODULE_DESCRIPTION(x)
#define MODULE_LICENSE(x)
 
struct module {};
 
#endif /* _LINUX_MODULE_H */
/drivers/include/linux/moduleparam.h
0,0 → 1,3
 
#define MODULE_PARM_DESC(_parm, desc)
#define module_param_named(name, value, type, perm)
/drivers/include/linux/pci.h
13,11 → 13,10
* PCI to PCI Bridge Specification
* PCI System Design Guide
*/
 
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
 
#include <types.h>
#include <linux/types.h>
#include <list.h>
#include <linux/pci_regs.h> /* The pci register defines */
#include <ioport.h>
276,6 → 275,20
#define PCI_D3cold ((pci_power_t __force) 4)
#define PCI_UNKNOWN ((pci_power_t __force) 5)
#define PCI_POWER_ERROR ((pci_power_t __force) -1)
 
/* Remember to update this when the list above changes! */
extern const char *pci_power_names[];
 
static inline const char *pci_power_name(pci_power_t state)
{
return pci_power_names[1 + (int) state];
}
 
#define PCI_PM_D2_DELAY 200
#define PCI_PM_D3_WAIT 10
#define PCI_PM_D3COLD_WAIT 100
#define PCI_PM_BUS_WAIT 50
 
/** The pci_channel state describes connectivity between the CPU and
* the pci device. If some PCI bus between here and the pci device
* has crashed or locked up, this info is reflected here.
346,9 → 359,10
u8 revision; /* PCI revision, low byte of class word */
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
u8 pcie_cap; /* PCI-E capability offset */
u8 pcie_type; /* PCI-E device/port type */
u8 pcie_mpss:3; /* PCI-E Max Payload Size Supported */
u8 rom_base_reg; /* which config register controls the ROM */
u8 pin; /* which interrupt pin this device uses */
u16 pcie_flags_reg; /* cached PCI-E Capabilities Register */
 
// struct pci_driver *driver; /* which driver has allocated this device */
uint64_t dma_mask; /* Mask of the bits of bus address this
367,14 → 381,25
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_interrupt:1;
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
unsigned int no_d3cold:1; /* D3cold is forbidden */
unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
unsigned int mmio_always_on:1; /* disallow turning off io/mem
decoding during bar sizing */
unsigned int wakeup_prepared:1;
unsigned int runtime_d3cold:1; /* whether go through runtime
D3cold, not set for devices
powered on/off by the
corresponding bridge */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
 
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
#endif
 
pci_channel_state_t error_state; /* current connectivity state */
struct device dev; /* Generic device interface */
387,7 → 412,6
*/
unsigned int irq;
struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
resource_size_t fw_addr[DEVICE_COUNT_RESOURCE]; /* FW-assigned addr */
 
/* These fields are used by common fixups */
unsigned int transparent:1; /* Transparent PCI bridge */
396,7 → 420,7
unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int block_cfg_access:1; /* config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
unsigned int msi_enabled:1;
411,15 → 435,15
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
// pci_dev_flags_t dev_flags;
// atomic_t enable_cnt; /* pci_enable_device has been called */
atomic_t enable_cnt; /* pci_enable_device has been called */
 
// u32 saved_config_space[16]; /* config space saved at suspend time */
// struct hlist_head saved_cap_space;
// struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */
// int rom_attr_enabled; /* has display of the rom attribute been enabled? */
// struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
// struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
 
 
};
 
#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start)
443,6 → 467,7
struct list_head slots; /* list of slots on this bus */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
struct list_head resources; /* address space routed to this bus */
struct resource busn_res; /* bus numbers routed to this bus */
 
struct pci_ops *ops; /* configuration access functions */
void *sysdata; /* hook for sys-specific extension */
450,8 → 475,6
 
unsigned char number; /* bus number */
unsigned char primary; /* number of primary bridge */
unsigned char secondary; /* number of secondary bridge */
unsigned char subordinate; /* max number of subordinate buses */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
 
571,6 → 594,16
return !!pci_pcie_cap(dev);
}
 
/**
* pci_pcie_type - get the PCIe device/port type
* @dev: PCI device
*/
static inline int pci_pcie_type(const struct pci_dev *dev)
{
return (dev->pcie_flags_reg & PCI_EXP_FLAGS_TYPE) >> 4;
}
 
 
static inline int pci_iov_init(struct pci_dev *dev)
{
return -ENODEV;
/drivers/include/linux/pci_regs.h
26,6 → 26,7
* Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_STD_HEADER_SIZEOF 64
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
125,7 → 126,8
#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
#define PCI_IO_RANGE_TYPE_16 0x00
#define PCI_IO_RANGE_TYPE_32 0x01
#define PCI_IO_RANGE_MASK (~0x0fUL)
#define PCI_IO_RANGE_MASK (~0x0fUL) /* Standard 4K I/O windows */
#define PCI_IO_1K_RANGE_MASK (~0x03UL) /* Intel 1K I/O windows */
#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
#define PCI_MEMORY_LIMIT 0x22
209,9 → 211,12
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
#define PCI_CAP_ID_MAX PCI_CAP_ID_AF
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
276,6 → 281,7
#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
#define PCI_CAP_VPD_SIZEOF 8
 
/* Slot Identification */
 
297,8 → 303,10
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
 
/* MSI-X registers */
#define PCI_MSIX_FLAGS 2
308,6 → 316,7
#define PCI_MSIX_TABLE 4
#define PCI_MSIX_PBA 8
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
 
/* MSI-X entry's format */
#define PCI_MSIX_ENTRY_SIZE 16
338,6 → 347,7
#define PCI_AF_CTRL_FLR 0x01
#define PCI_AF_STATUS 5
#define PCI_AF_STATUS_TP 0x01
#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
 
/* PCI-X registers */
 
374,6 → 384,10
#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
#define PCI_X_ECC_CSR 8 /* ECC control and status */
#define PCI_CAP_PCIX_SIZEOF_V0 8 /* size of registers for Version 0 */
#define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */
#define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */
 
/* PCI Bridge Subsystem ID registers */
 
391,8 → 405,9
#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIE Bridge */
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
#define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
461,6 → 476,7
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints end here */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
506,6 → 522,12
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
/*
* Note that the following PCI Express 'Capability Structure' registers
* were introduced with 'Capability Version' 0x2 (v2). These registers
* do not exist on devices with Capability Version 1. Use pci_pcie_cap2()
* to use these fields safely.
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_LTR 0x800 /* Latency tolerance reporting */
520,7 → 542,14
#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x04 /* Current Link Speed 8.0GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
 
/* Extended Capabilities (PCI-X 2.0 and Express) */
528,21 → 557,43
#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
 
#define PCI_EXT_CAP_ID_ERR 1
#define PCI_EXT_CAP_ID_VC 2
#define PCI_EXT_CAP_ID_DSN 3
#define PCI_EXT_CAP_ID_PWR 4
#define PCI_EXT_CAP_ID_VNDR 11
#define PCI_EXT_CAP_ID_ACS 13
#define PCI_EXT_CAP_ID_ARI 14
#define PCI_EXT_CAP_ID_ATS 15
#define PCI_EXT_CAP_ID_SRIOV 16
#define PCI_EXT_CAP_ID_LTR 24
#define PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */
#define PCI_EXT_CAP_ID_VC 0x02 /* Virtual Channel Capability */
#define PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */
#define PCI_EXT_CAP_ID_PWR 0x04 /* Power Budgeting */
#define PCI_EXT_CAP_ID_RCLD 0x05 /* Root Complex Link Declaration */
#define PCI_EXT_CAP_ID_RCILC 0x06 /* Root Complex Internal Link Control */
#define PCI_EXT_CAP_ID_RCEC 0x07 /* Root Complex Event Collector */
#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor Specific */
#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
#define PCI_EXT_CAP_ID_ATS 0x0F /* Address Translation Services */
#define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* reserved for AMD */
#define PCI_EXT_CAP_ID_REBAR 0x15 /* resizable BAR */
#define PCI_EXT_CAP_ID_DPA 0x16 /* dynamic power alloc */
#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH request */
#define PCI_EXT_CAP_ID_LTR 0x18 /* latency tolerance reporting */
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
 
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
 
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
552,6 → 603,11
#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
#define PCI_ERR_UNC_ACSV 0x00200000 /* ACS Violation */
#define PCI_ERR_UNC_INTN 0x00400000 /* internal error */
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
/* Same bits as above */
#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
562,6 → 618,9
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
/* Same bits as above */
#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
593,12 → 652,18
 
/* Virtual Channel */
#define PCI_VC_PORT_REG1 4
#define PCI_VC_REG1_EVCC 0x7 /* extended vc count */
#define PCI_VC_PORT_REG2 8
#define PCI_VC_REG2_32_PHASE 0x2
#define PCI_VC_REG2_64_PHASE 0x4
#define PCI_VC_REG2_128_PHASE 0x8
#define PCI_VC_PORT_CTRL 12
#define PCI_VC_PORT_STATUS 14
#define PCI_VC_RES_CAP 16
#define PCI_VC_RES_CTRL 20
#define PCI_VC_RES_STATUS 26
#define PCI_CAP_VC_BASE_SIZEOF 0x10
#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
 
/* Power Budgeting */
#define PCI_PWR_DSR 4 /* Data Select Register */
611,7 → 676,14
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
#define PCI_PWR_CAP 12 /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
#define PCI_EXT_CAP_PWR_SIZEOF 16
 
/* Vendor-Specific (VSEC, PCI_EXT_CAP_ID_VNDR) */
#define PCI_VNDR_HEADER 4 /* Vendor-Specific Header */
#define PCI_VNDR_HEADER_ID(x) ((x) & 0xffff)
#define PCI_VNDR_HEADER_REV(x) (((x) >> 16) & 0xf)
#define PCI_VNDR_HEADER_LEN(x) (((x) >> 20) & 0xfff)
 
/*
* Hypertransport sub capability types
*
643,6 → 715,8
#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
 
/* Alternative Routing-ID Interpretation */
#define PCI_ARI_CAP 0x04 /* ARI Capability Register */
653,6 → 727,7
#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
#define PCI_EXT_CAP_ARI_SIZEOF 8
 
/* Address Translation Service */
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
662,26 → 737,29
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
#define PCI_EXT_CAP_ATS_SIZEOF 8
 
/* Page Request Interface */
#define PCI_PRI_CAP 0x13 /* PRI capability ID */
#define PCI_PRI_CONTROL_OFF 0x04 /* Offset of control register */
#define PCI_PRI_STATUS_OFF 0x06 /* Offset of status register */
#define PCI_PRI_ENABLE 0x0001 /* Enable mask */
#define PCI_PRI_RESET 0x0002 /* Reset bit mask */
#define PCI_PRI_STATUS_RF 0x0001 /* Request Failure */
#define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ_OFF 0x08 /* Cap offset for max reqs supported */
#define PCI_PRI_ALLOC_REQ_OFF 0x0c /* Cap offset for max reqs allowed */
#define PCI_PRI_CTRL 0x04 /* PRI control register */
#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */
#define PCI_PRI_CTRL_RESET 0x02 /* Reset */
#define PCI_PRI_STATUS 0x06 /* PRI status register */
#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
#define PCI_EXT_CAP_PRI_SIZEOF 16
 
/* PASID capability */
#define PCI_PASID_CAP 0x1b /* PASID capability ID */
#define PCI_PASID_CAP_OFF 0x04 /* PASID feature register */
#define PCI_PASID_CONTROL_OFF 0x06 /* PASID control register */
#define PCI_PASID_ENABLE 0x01 /* Enable/Supported bit */
#define PCI_PASID_EXEC 0x02 /* Exec permissions Enable/Supported */
#define PCI_PASID_PRIV 0x04 /* Priviledge Mode Enable/Support */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
#define PCI_PASID_CAP_EXEC 0x02 /* Exec permissions Supported */
#define PCI_PASID_CAP_PRIV 0x04 /* Priviledge Mode Supported */
#define PCI_PASID_CTRL 0x06 /* PASID control register */
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */
#define PCI_EXT_CAP_PASID_SIZEOF 8
 
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
713,6 → 791,7
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
#define PCI_EXT_CAP_SRIOV_SIZEOF 64
 
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
719,6 → 798,7
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
#define PCI_EXT_CAP_LTR_SIZEOF 8
 
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
729,7 → 809,38
#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
#define PCI_ACS_EC 0x20 /* P2P Egress Control */
#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
 
#define PCI_VSEC_HDR 4 /* extended cap - vendor specific */
#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
 
/* sata capability */
#define PCI_SATA_REGS 4 /* SATA REGs specifier */
#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
#define PCI_SATA_SIZEOF_SHORT 8
#define PCI_SATA_SIZEOF_LONG 16
 
/* resizable BARs */
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
 
/* dynamic power allocation */
#define PCI_DPA_CAP 4 /* capability register */
#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
 
/* TPH Requester */
#define PCI_TPH_CAP 4 /* capability register */
#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
 
#endif /* LINUX_PCI_REGS_H */
/drivers/include/linux/poison.h
40,12 → 40,6
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
 
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
#else
#define MEMBLOCK_INACTIVE 0x44c9e71bUL
#endif
 
#define SLUB_RED_INACTIVE 0xbb
#define SLUB_RED_ACTIVE 0xcc
 
/drivers/include/linux/spinlock.h
344,4 → 344,10
# include <linux/spinlock_api_up.h>
#endif
 
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
};
 
#endif /* __LINUX_SPINLOCK_H */
/drivers/include/linux/spinlock_api_up.h
31,7 → 31,7
do { local_bh_disable(); __LOCK(lock); } while (0)
 
#define __LOCK_IRQ(lock) \
do { local_irq_disable(); __LOCK(lock); } while (0)
do { asm volatile ("cli \n"); __LOCK(lock); } while (0)
 
#define __LOCK_IRQSAVE(lock, flags) \
do { \
51,7 → 51,7
__release(lock); (void)(lock); } while (0)
 
#define __UNLOCK_IRQ(lock) \
do { local_irq_enable(); __UNLOCK(lock); } while (0)
do { asm volatile ("sti \n"); __UNLOCK(lock); } while (0)
 
#define __UNLOCK_IRQRESTORE(lock, flags) \
do { \
/drivers/include/linux/types.h
24,7 → 24,8
typedef __kernel_dev_t dev_t;
typedef __kernel_ino_t ino_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef unsigned short umode_t;
typedef __u32 nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_daddr_t daddr_t;
252,9 → 253,7
typedef unsigned int addr_t;
typedef unsigned int count_t;
 
# define WARN(condition, format...)
 
 
#define false 0
#define true 1
 
267,14 → 266,6
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 
 
 
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
 
#define DRM_INFO(fmt, arg...) dbgprintf("DRM: "fmt , ##arg)
 
#define DRM_ERROR(fmt, arg...) \
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
 
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
 
 
345,24 → 336,7
#define PAGE_MASK (~(PAGE_SIZE-1))
 
 
#define do_div(n, base) \
({ \
unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \
asm("":"=a" (__low), "=d" (__high) : "A" (n)); \
__upper = __high; \
if (__high) { \
__upper = __high % (__base); \
__high = __high / (__base); \
} \
asm("divl %2":"=a" (__low), "=d" (__mod) \
: "rm" (__base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n) : "a" (__low), "d" (__high)); \
__mod; \
})
 
 
 
#define ENTER() dbgprintf("enter %s\n",__FUNCTION__)
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__)
 
375,4 → 349,9
 
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
 
#ifndef __read_mostly
#define __read_mostly
#endif
 
 
#endif /* _LINUX_TYPES_H */
/drivers/include/linux/wait.h
36,6 → 36,40
} while (0)
 
 
 
 
#define wait_event_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
do{ \
wait_queue_t __wait = { \
.task_list = LIST_HEAD_INIT(__wait.task_list), \
.evnt = CreateEvent(NULL, MANUAL_DESTROY), \
}; \
u32 flags; \
\
spin_lock_irqsave(&wq.lock, flags); \
if (list_empty(&__wait.task_list)) \
__add_wait_queue(&wq, &__wait); \
spin_unlock_irqrestore(&wq.lock, flags); \
\
for(;;){ \
if (condition) \
break; \
WaitEvent(__wait.evnt); \
}; \
if (!list_empty_careful(&__wait.task_list)) { \
spin_lock_irqsave(&wq.lock, flags); \
list_del_init(&__wait.task_list); \
spin_unlock_irqrestore(&wq.lock, flags); \
}; \
DestroyEvent(__wait.evnt); \
} while (0); \
__ret; \
})
 
 
 
#define wait_event(wq, condition) \
do{ \
wait_queue_t __wait = { \
63,6 → 97,8
} while (0)
 
 
 
 
static inline
void wake_up_all(wait_queue_head_t *q)
{
127,10 → 163,13
struct work_struct work;
};
 
 
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
unsigned int flags, int max_active);
 
 
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
 
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
 
140,5 → 179,12
(_work)->work.func = _func; \
} while (0)
 
 
struct completion {
unsigned int done;
wait_queue_head_t wait;
};
 
 
#endif
 
/drivers/include/syscall.h
39,6 → 39,7
addr_t STDCALL AllocPages(count_t count)__asm__("AllocPages");
void IMPORT __attribute__((regparm(1)))
FreePage(addr_t page)__asm__("FreePage");
void STDCALL MapPage(void *vaddr, addr_t paddr, u32_t flags)__asm__("MapPage");
 
 
void* STDCALL CreateRingBuffer(size_t size, u32_t map)__asm__("CreateRingBuffer");
91,6 → 92,48
#define pciWriteLong(tag, reg, val) \
PciWrite32(PCI_BUS_FROM_TAG(tag),PCI_DFN_FROM_TAG(tag),(reg),(val))
 
static inline int pci_read_config_byte(struct pci_dev *dev, int where,
u8 *val)
{
*val = PciRead8(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_byte(struct pci_dev *dev, int where,
u8 val)
{
PciWrite8(dev->busnr, dev->devfn, where, val);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
 
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
{
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
}
 
///////////////////////////////////////////////////////////////////////////////
 
int dbg_open(char *path);
447,4 → 490,37
 
#define rmb() asm volatile("lfence":::"memory")
 
static inline void *vzalloc(unsigned long size)
{
void *mem;
 
mem = KernelAlloc(size);
if(mem)
memset(mem, 0, size);
 
return mem;
};
 
static inline void vfree(void *addr)
{
KernelFree(addr);
}
 
static inline int power_supply_is_system_supplied(void) { return -1; }
 
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
 
static void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
 
#endif
/drivers/video/drm/drm_crtc.c
31,16 → 31,12
*/
#include <linux/list.h>
#include <linux/slab.h>
#include "drm.h"
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
 
struct drm_prop_enum_list {
int type;
char *name;
};
 
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
162,6 → 158,7
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
};
 
static struct drm_prop_enum_list drm_encoder_enum_list[] =
170,6 → 167,7
{ DRM_MODE_ENCODER_TMDS, "TMDS" },
{ DRM_MODE_ENCODER_LVDS, "LVDS" },
{ DRM_MODE_ENCODER_TVDAC, "TV" },
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
 
char *drm_get_encoder_name(struct drm_encoder *encoder)
228,7 → 226,7
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
return -EINVAL;
return -ENOMEM;
}
 
mutex_lock(&dev->mode_config.idr_mutex);
236,6 → 234,8
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
else if (ret)
return ret;
 
obj->id = new_id;
obj->type = obj_type;
294,9 → 294,8
int ret;
 
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret) {
if (ret)
return ret;
}
 
fb->dev = dev;
fb->funcs = funcs;
320,23 → 319,13
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
 
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
set.fb = NULL;
ret = crtc->funcs->set_config(&set);
if (ret)
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
 
/*
* This could be moved to drm_framebuffer_remove(), but for
* debugging is nice to keep around the list of fb's that are
* no longer associated w/ a drm_file but are not unreferenced
* yet. (i915 and omapdrm have debugfs files which will show
* this.)
*/
drm_mode_object_put(dev, &fb->base);
list_del(&fb->head);
dev->mode_config.num_fb--;
343,6 → 332,10
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
 
 
 
 
 
/**
* drm_crtc_init - Initialise a new CRTC object
* @dev: DRM device
350,22 → 343,37
* @funcs: callbacks for the new CRTC
*
* LOCKING:
* Caller must hold mode config lock.
* Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
* RETURNS:
* Zero on success, error code on failure.
*/
void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
const struct drm_crtc_funcs *funcs)
{
int ret;
 
crtc->dev = dev;
crtc->funcs = funcs;
crtc->invert_dimensions = false;
 
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
 
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
goto out;
 
crtc->base.properties = &crtc->properties;
 
list_add_tail(&crtc->head, &dev->mode_config.crtc_list);
dev->mode_config.num_crtc++;
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_crtc_init);
 
425,7 → 433,7
struct drm_display_mode *mode)
{
list_del(&mode->head);
kfree(mode);
drm_mode_destroy(connector->dev, mode);
}
EXPORT_SYMBOL(drm_mode_remove);
 
437,21 → 445,30
* @name: user visible name of the connector
*
* LOCKING:
* Caller must hold @dev's mode_config lock.
* Takes mode config lock.
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
* RETURNS:
* Zero on success, error code on failure.
*/
void drm_connector_init(struct drm_device *dev,
int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type)
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
goto out;
 
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
connector->connector_type = connector_type;
connector->connector_type_id =
++drm_connector_enum_list[connector_type].count; /* TODO */
463,13 → 480,18
list_add_tail(&connector->head, &dev->mode_config.connector_list);
dev->mode_config.num_connector++;
 
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
drm_connector_attach_property(connector,
dev->mode_config.edid_property, 0);
dev->mode_config.edid_property,
0);
 
drm_connector_attach_property(connector,
dev->mode_config.dpms_property, 0);
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_connector_init);
 
478,7 → 500,7
* @connector: connector to cleanup
*
* LOCKING:
* Caller must hold @dev's mode_config lock.
* Takes mode config lock.
*
* Cleans up the connector but doesn't free the object.
*/
504,16 → 526,31
}
EXPORT_SYMBOL(drm_connector_cleanup);
 
void drm_encoder_init(struct drm_device *dev,
void drm_connector_unplug_all(struct drm_device *dev)
{
struct drm_connector *connector;
 
/* taking the mode config mutex ends up in a clash with sysfs */
// list_for_each_entry(connector, &dev->mode_config.connector_list, head)
// drm_sysfs_connector_remove(connector);
 
}
EXPORT_SYMBOL(drm_connector_unplug_all);
 
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
int encoder_type)
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
goto out;
 
encoder->dev = dev;
 
drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
encoder->encoder_type = encoder_type;
encoder->funcs = funcs;
 
520,7 → 557,10
list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
dev->mode_config.num_encoder++;
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_encoder_init);
 
535,6 → 575,70
}
EXPORT_SYMBOL(drm_encoder_cleanup);
 
int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, uint32_t format_count,
bool priv)
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
goto out;
 
plane->base.properties = &plane->properties;
plane->dev = dev;
plane->funcs = funcs;
plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
GFP_KERNEL);
if (!plane->format_types) {
DRM_DEBUG_KMS("out of memory when allocating plane\n");
drm_mode_object_put(dev, &plane->base);
ret = -ENOMEM;
goto out;
}
 
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
 
/* private planes are not exposed to userspace, but depending on
* display hardware, might be convenient to allow sharing programming
* for the scanout engine with the crtc implementation.
*/
if (!priv) {
list_add_tail(&plane->head, &dev->mode_config.plane_list);
dev->mode_config.num_plane++;
} else {
INIT_LIST_HEAD(&plane->head);
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
EXPORT_SYMBOL(drm_plane_init);
 
void drm_plane_cleanup(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
 
mutex_lock(&dev->mode_config.mutex);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
if (!list_empty(&plane->head)) {
list_del(&plane->head);
dev->mode_config.num_plane--;
}
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_plane_cleanup);
 
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
555,7 → 659,11
if (!nmode)
return NULL;
 
drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE);
if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
kfree(nmode);
return NULL;
}
 
return nmode;
}
EXPORT_SYMBOL(drm_mode_create);
572,6 → 680,9
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
{
if (!mode)
return;
 
drm_mode_object_put(dev, &mode->base);
 
kfree(mode);
582,7 → 693,6
{
struct drm_property *edid;
struct drm_property *dpms;
int i;
 
/*
* Standard properties (apply to all connectors)
592,11 → 702,9
"EDID", 0);
dev->mode_config.edid_property = edid;
 
dpms = drm_property_create(dev, DRM_MODE_PROP_ENUM,
"DPMS", ARRAY_SIZE(drm_dpms_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dpms_enum_list); i++)
drm_property_add_enum(dpms, i, drm_dpms_enum_list[i].type,
drm_dpms_enum_list[i].name);
dpms = drm_property_create_enum(dev, 0,
"DPMS", drm_dpms_enum_list,
ARRAY_SIZE(drm_dpms_enum_list));
dev->mode_config.dpms_property = dpms;
 
return 0;
612,30 → 720,21
{
struct drm_property *dvi_i_selector;
struct drm_property *dvi_i_subconnector;
int i;
 
if (dev->mode_config.dvi_i_select_subconnector_property)
return 0;
 
dvi_i_selector =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
drm_property_create_enum(dev, 0,
"select subconnector",
drm_dvi_i_select_enum_list,
ARRAY_SIZE(drm_dvi_i_select_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dvi_i_select_enum_list); i++)
drm_property_add_enum(dvi_i_selector, i,
drm_dvi_i_select_enum_list[i].type,
drm_dvi_i_select_enum_list[i].name);
dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector;
 
dvi_i_subconnector =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_dvi_i_subconnector_enum_list,
ARRAY_SIZE(drm_dvi_i_subconnector_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dvi_i_subconnector_enum_list); i++)
drm_property_add_enum(dvi_i_subconnector, i,
drm_dvi_i_subconnector_enum_list[i].type,
drm_dvi_i_subconnector_enum_list[i].name);
dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector;
 
return 0;
666,23 → 765,17
/*
* Basic connector properties
*/
tv_selector = drm_property_create(dev, DRM_MODE_PROP_ENUM,
tv_selector = drm_property_create_enum(dev, 0,
"select subconnector",
drm_tv_select_enum_list,
ARRAY_SIZE(drm_tv_select_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_tv_select_enum_list); i++)
drm_property_add_enum(tv_selector, i,
drm_tv_select_enum_list[i].type,
drm_tv_select_enum_list[i].name);
dev->mode_config.tv_select_subconnector_property = tv_selector;
 
tv_subconnector =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE, "subconnector",
drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"subconnector",
drm_tv_subconnector_enum_list,
ARRAY_SIZE(drm_tv_subconnector_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_tv_subconnector_enum_list); i++)
drm_property_add_enum(tv_subconnector, i,
drm_tv_subconnector_enum_list[i].type,
drm_tv_subconnector_enum_list[i].name);
dev->mode_config.tv_subconnector_property = tv_subconnector;
 
/*
689,28 → 782,16
* Other, TV specific properties: margins & TV modes.
*/
dev->mode_config.tv_left_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"left margin", 2);
dev->mode_config.tv_left_margin_property->values[0] = 0;
dev->mode_config.tv_left_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "left margin", 0, 100);
 
dev->mode_config.tv_right_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"right margin", 2);
dev->mode_config.tv_right_margin_property->values[0] = 0;
dev->mode_config.tv_right_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "right margin", 0, 100);
 
dev->mode_config.tv_top_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"top margin", 2);
dev->mode_config.tv_top_margin_property->values[0] = 0;
dev->mode_config.tv_top_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "top margin", 0, 100);
 
dev->mode_config.tv_bottom_margin_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"bottom margin", 2);
dev->mode_config.tv_bottom_margin_property->values[0] = 0;
dev->mode_config.tv_bottom_margin_property->values[1] = 100;
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
 
dev->mode_config.tv_mode_property =
drm_property_create(dev, DRM_MODE_PROP_ENUM,
720,40 → 801,22
i, modes[i]);
 
dev->mode_config.tv_brightness_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"brightness", 2);
dev->mode_config.tv_brightness_property->values[0] = 0;
dev->mode_config.tv_brightness_property->values[1] = 100;
drm_property_create_range(dev, 0, "brightness", 0, 100);
 
dev->mode_config.tv_contrast_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"contrast", 2);
dev->mode_config.tv_contrast_property->values[0] = 0;
dev->mode_config.tv_contrast_property->values[1] = 100;
drm_property_create_range(dev, 0, "contrast", 0, 100);
 
dev->mode_config.tv_flicker_reduction_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"flicker reduction", 2);
dev->mode_config.tv_flicker_reduction_property->values[0] = 0;
dev->mode_config.tv_flicker_reduction_property->values[1] = 100;
drm_property_create_range(dev, 0, "flicker reduction", 0, 100);
 
dev->mode_config.tv_overscan_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"overscan", 2);
dev->mode_config.tv_overscan_property->values[0] = 0;
dev->mode_config.tv_overscan_property->values[1] = 100;
drm_property_create_range(dev, 0, "overscan", 0, 100);
 
dev->mode_config.tv_saturation_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"saturation", 2);
dev->mode_config.tv_saturation_property->values[0] = 0;
dev->mode_config.tv_saturation_property->values[1] = 100;
drm_property_create_range(dev, 0, "saturation", 0, 100);
 
dev->mode_config.tv_hue_property =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"hue", 2);
dev->mode_config.tv_hue_property->values[0] = 0;
dev->mode_config.tv_hue_property->values[1] = 100;
drm_property_create_range(dev, 0, "hue", 0, 100);
 
return 0;
}
769,18 → 832,14
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
{
struct drm_property *scaling_mode;
int i;
 
if (dev->mode_config.scaling_mode_property)
return 0;
 
scaling_mode =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "scaling mode",
drm_property_create_enum(dev, 0, "scaling mode",
drm_scaling_mode_enum_list,
ARRAY_SIZE(drm_scaling_mode_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_scaling_mode_enum_list); i++)
drm_property_add_enum(scaling_mode, i,
drm_scaling_mode_enum_list[i].type,
drm_scaling_mode_enum_list[i].name);
 
dev->mode_config.scaling_mode_property = scaling_mode;
 
798,18 → 857,14
int drm_mode_create_dithering_property(struct drm_device *dev)
{
struct drm_property *dithering_mode;
int i;
 
if (dev->mode_config.dithering_mode_property)
return 0;
 
dithering_mode =
drm_property_create(dev, DRM_MODE_PROP_ENUM, "dithering",
drm_property_create_enum(dev, 0, "dithering",
drm_dithering_mode_enum_list,
ARRAY_SIZE(drm_dithering_mode_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dithering_mode_enum_list); i++)
drm_property_add_enum(dithering_mode, i,
drm_dithering_mode_enum_list[i].type,
drm_dithering_mode_enum_list[i].name);
dev->mode_config.dithering_mode_property = dithering_mode;
 
return 0;
826,20 → 881,15
int drm_mode_create_dirty_info_property(struct drm_device *dev)
{
struct drm_property *dirty_info;
int i;
 
if (dev->mode_config.dirty_info_property)
return 0;
 
dirty_info =
drm_property_create(dev, DRM_MODE_PROP_ENUM |
DRM_MODE_PROP_IMMUTABLE,
drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
"dirty",
drm_dirty_info_enum_list,
ARRAY_SIZE(drm_dirty_info_enum_list));
for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
drm_property_add_enum(dirty_info, i,
drm_dirty_info_enum_list[i].type,
drm_dirty_info_enum_list[i].name);
dev->mode_config.dirty_info_property = dirty_info;
 
return 0;
866,6 → 916,7
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
 
mutex_lock(&dev->mode_config.mutex);
922,6 → 973,7
 
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
 
/**
* drm_mode_config_cleanup - free up DRM mode_config info
942,6 → 994,7
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_plane *plane, *plt;
 
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
958,8 → 1011,9
drm_property_destroy(dev, property);
}
 
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
fb->funcs->destroy(fb);
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
}
 
list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
966,6 → 1020,8
crtc->funcs->destroy(crtc);
}
 
idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
 
980,9 → 1036,16
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
struct drm_display_mode *in)
static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in)
{
WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
"timing values too large for mode info\n");
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
1011,10 → 1074,16
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
* RETURNS:
* Zero on success, errno on failure.
*/
void drm_crtc_convert_umode(struct drm_display_mode *out,
struct drm_mode_modeinfo *in)
static int drm_crtc_convert_umode(struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
return -ERANGE;
 
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
1031,6 → 1100,8
out->type = in->type;
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 
return 0;
}
 
 
1231,7 → 1302,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Construct a CRTC configuration structure to return to the user.
*
1291,7 → 1362,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Construct a connector configuration structure to return to the user.
*
1336,11 → 1407,7
}
connector = obj_to_connector(obj);
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
props_count++;
}
}
props_count = connector->properties.count;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
1376,7 → 1443,7
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
1391,17 → 1458,16
 
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (put_user(connector->property_ids[i],
prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < connector->properties.count; i++) {
if (put_user(connector->properties.ids[i],
prop_ptr + copied)) {
ret = -EFAULT;
goto out;
}
 
if (put_user(connector->property_values[i],
if (put_user(connector->properties.values[i],
prop_values + copied)) {
ret = -EFAULT;
goto out;
1409,12 → 1475,11
copied++;
}
}
}
out_resp->count_props = props_count;
 
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
1468,6 → 1533,254
}
 
/**
* drm_mode_getplane_res - get plane info
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane_res *plane_resp = data;
struct drm_mode_config *config;
struct drm_plane *plane;
uint32_t __user *plane_ptr;
int copied = 0, ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
config = &dev->mode_config;
 
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (config->num_plane &&
(plane_resp->count_planes >= config->num_plane)) {
plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
 
list_for_each_entry(plane, &config->plane_list, head) {
if (put_user(plane->base.id, plane_ptr + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
plane_resp->count_planes = config->num_plane;
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
/**
* drm_mode_getplane - get plane info
* @dev: DRM device
* @data: ioctl data
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
int drm_mode_getplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_get_plane *plane_resp = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
uint32_t __user *format_ptr;
int ret = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
ret = -ENOENT;
goto out;
}
plane = obj_to_plane(obj);
 
if (plane->crtc)
plane_resp->crtc_id = plane->crtc->base.id;
else
plane_resp->crtc_id = 0;
 
if (plane->fb)
plane_resp->fb_id = plane->fb->base.id;
else
plane_resp->fb_id = 0;
 
plane_resp->plane_id = plane->base.id;
plane_resp->possible_crtcs = plane->possible_crtcs;
plane_resp->gamma_size = plane->gamma_size;
 
/*
* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it.
*/
if (plane->format_count &&
(plane_resp->count_format_types >= plane->format_count)) {
format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
if (copy_to_user(format_ptr,
plane->format_types,
sizeof(uint32_t) * plane->format_count)) {
ret = -EFAULT;
goto out;
}
}
plane_resp->count_format_types = plane->format_count;
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
/**
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
* @file_prive: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
*/
int drm_mode_setplane(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_set_plane *plane_req = data;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
*/
obj = drm_mode_object_find(dev, plane_req->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
ret = -ENOENT;
goto out;
}
plane = obj_to_plane(obj);
 
/* No fb means shut it down */
if (!plane_req->fb_id) {
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
goto out;
}
 
obj = drm_mode_object_find(dev, plane_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
plane_req->crtc_id);
ret = -ENOENT;
goto out;
}
crtc = obj_to_crtc(obj);
 
obj = drm_mode_object_find(dev, plane_req->fb_id,
DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
fb = obj_to_fb(obj);
 
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (fb->pixel_format == plane->format_types[i])
break;
if (i == plane->format_count) {
DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
ret = -EINVAL;
goto out;
}
 
fb_width = fb->width << 16;
fb_height = fb->height << 16;
 
/* Make sure source coordinates are inside the fb. */
if (plane_req->src_w > fb_width ||
plane_req->src_x > fb_width - plane_req->src_w ||
plane_req->src_h > fb_height ||
plane_req->src_y > fb_height - plane_req->src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
plane_req->src_w >> 16,
((plane_req->src_w & 0xffff) * 15625) >> 10,
plane_req->src_h >> 16,
((plane_req->src_h & 0xffff) * 15625) >> 10,
plane_req->src_x >> 16,
((plane_req->src_x & 0xffff) * 15625) >> 10,
plane_req->src_y >> 16,
((plane_req->src_y & 0xffff) * 15625) >> 10);
ret = -ENOSPC;
goto out;
}
 
/* Give drivers some help against integer overflows */
if (plane_req->crtc_w > INT_MAX ||
plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
plane_req->crtc_h > INT_MAX ||
plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
plane_req->crtc_w, plane_req->crtc_h,
plane_req->crtc_x, plane_req->crtc_y);
ret = -ERANGE;
goto out;
}
 
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
plane->crtc = crtc;
plane->fb = fb;
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
 
/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
1475,7 → 1788,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Build a new CRTC configuration based on user request.
*
1490,18 → 1803,22
struct drm_mode_config *config = &dev->mode_config;
struct drm_mode_crtc *crtc_req = data;
struct drm_mode_object *obj;
struct drm_crtc *crtc, *crtcfb;
struct drm_crtc *crtc;
struct drm_connector **connector_set = NULL, *connector;
struct drm_framebuffer *fb = NULL;
struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
int ret = 0;
int ret;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
/* For some reason crtc x/y offsets are signed internally. */
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
return -ERANGE;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
1514,17 → 1831,16
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
if (crtc_req->mode_valid) {
int hdisplay, vdisplay;
/* If we have a mode we need a framebuffer. */
/* If we pass -1, set the mode with the currently bound fb */
if (crtc_req->fb_id == -1) {
list_for_each_entry(crtcfb,
&dev->mode_config.crtc_list, head) {
if (crtcfb == crtc) {
DRM_DEBUG_KMS("Using current fb for "
"setmode\n");
if (!crtc->fb) {
DRM_DEBUG_KMS("CRTC doesn't have current FB\n");
ret = -EINVAL;
goto out;
}
fb = crtc->fb;
}
}
} else {
obj = drm_mode_object_find(dev, crtc_req->fb_id,
DRM_MODE_OBJECT_FB);
1538,9 → 1854,37
}
 
mode = drm_mode_create(dev);
drm_crtc_convert_umode(mode, &crtc_req->mode);
if (!mode) {
ret = -ENOMEM;
goto out;
}
 
ret = drm_crtc_convert_umode(mode, &crtc_req->mode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
goto out;
}
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
if (hdisplay > fb->width ||
vdisplay > fb->height ||
crtc_req->x > fb->width - hdisplay ||
crtc_req->y > fb->height - vdisplay) {
DRM_DEBUG_KMS("Invalid fb size %ux%u for CRTC viewport %ux%u+%d+%d%s.\n",
fb->width, fb->height,
hdisplay, vdisplay, crtc_req->x, crtc_req->y,
crtc->invert_dimensions ? " (inverted)" : "");
ret = -ENOSPC;
goto out;
}
}
 
if (crtc_req->count_connectors == 0 && mode) {
DRM_DEBUG_KMS("Count connectors is 0 but mode set\n");
1573,7 → 1917,7
}
 
for (i = 0; i < crtc_req->count_connectors; i++) {
set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
1607,6 → 1951,7
 
out:
kfree(connector_set);
drm_mode_destroy(dev, mode);
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
1622,10 → 1967,8
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if (!req->flags) {
DRM_ERROR("no operation set\n");
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
}
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
1638,7 → 1981,6
 
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
DRM_ERROR("crtc does not support cursor\n");
ret = -ENXIO;
goto out;
}
1651,7 → 1993,6
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
1660,7 → 2001,43
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
#endif
/* Original addfb only supported RGB formats, so figure out which one */
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
{
uint32_t fmt;
 
switch (bpp) {
case 8:
fmt = DRM_FORMAT_RGB332;
break;
case 16:
if (depth == 15)
fmt = DRM_FORMAT_XRGB1555;
else
fmt = DRM_FORMAT_RGB565;
break;
case 24:
fmt = DRM_FORMAT_RGB888;
break;
case 32:
if (depth == 24)
fmt = DRM_FORMAT_XRGB8888;
else if (depth == 30)
fmt = DRM_FORMAT_XRGB2101010;
else
fmt = DRM_FORMAT_ARGB8888;
break;
default:
DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
fmt = DRM_FORMAT_XRGB8888;
break;
}
 
return fmt;
}
EXPORT_SYMBOL(drm_mode_legacy_fb_format);
#if 0
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
1681,31 → 2058,210
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
struct drm_mode_fb_cmd *or = data;
struct drm_mode_fb_cmd2 r = {};
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
 
/* Use new struct with format internally */
r.fb_id = or->fb_id;
r.width = or->width;
r.height = or->height;
r.pitches[0] = or->pitch;
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
r.handles[0] = or->handle;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if ((config->min_width > r.width) || (r.width > config->max_width))
return -EINVAL;
 
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
 
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
 
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_XBGR4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_BGRX4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_BGRA4444:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_AYUV:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 0;
default:
return -EINVAL;
}
}
 
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
 
ret = format_check(r);
if (ret) {
DRM_DEBUG_KMS("bad framebuffer format 0x%08x\n", r->pixel_format);
return ret;
}
 
hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
num_planes = drm_format_num_planes(r->pixel_format);
 
if (r->width == 0 || r->width % hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height);
return -EINVAL;
}
 
if (r->height == 0 || r->height % vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
 
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
 
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
 
if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
}
 
return 0;
}
 
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Add a new FB to the specified CRTC, given a user request with format.
*
* Called by the user via ioctl.
*
* RETURNS:
* Zero on success, errno on failure.
*/
int drm_mode_addfb2(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
if ((config->min_width > r->width) || (r->width > config->max_width)) {
DRM_ERROR("mode new framebuffer width not within limits\n");
DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
DRM_ERROR("mode new framebuffer height not within limits\n");
DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n",
r->height, config->min_height, config->max_height);
return -EINVAL;
}
 
ret = framebuffer_check(r);
if (ret)
return ret;
 
mutex_lock(&dev->mode_config.mutex);
 
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_ERROR("could not create framebuffer\n");
DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
}
1753,7 → 2309,6
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
1764,17 → 2319,12
found = 1;
 
if (!found) {
DRM_ERROR("tried to remove a fb that we didn't own\n");
ret = -EINVAL;
goto out;
}
 
/* TODO release all crtc connected to the framebuffer */
/* TODO unhock the destructor from the buffer object */
drm_framebuffer_remove(fb);
 
list_del(&fb->filp_head);
fb->funcs->destroy(fb);
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
1788,7 → 2338,7
* @arg: arg from ioctl
*
* LOCKING:
* Caller? (FIXME)
* Takes mode config lock.
*
* Lookup the FB given its ID and return info about it.
*
1811,7 → 2361,6
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
1821,7 → 2370,7
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitch;
r->pitch = fb->pitches[0];
fb->funcs->create_handle(fb, file_priv, &r->handle);
 
out:
1839,7 → 2388,7
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
1847,7 → 2396,6
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
1854,7 → 2402,7
fb = obj_to_fb(obj);
 
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
 
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
1870,6 → 2418,10
}
 
if (num_clips && clips_ptr) {
if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
ret = -EINVAL;
goto out_err1;
}
clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
if (!clips) {
ret = -ENOMEM;
1921,8 → 2473,7
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
list_del(&fb->filp_head);
fb->funcs->destroy(fb);
drm_framebuffer_remove(fb);
}
mutex_unlock(&dev->mode_config.mutex);
}
1936,39 → 2487,49
*
* Add @mode to @connector's user mode list.
*/
static int drm_mode_attachmode(struct drm_device *dev,
static void drm_mode_attachmode(struct drm_device *dev,
struct drm_connector *connector,
struct drm_display_mode *mode)
{
int ret = 0;
 
list_add_tail(&mode->head, &connector->user_modes);
return ret;
}
 
int drm_mode_attachmode_crtc(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct drm_connector *connector;
int ret = 0;
struct drm_display_mode *dup_mode;
int need_dup = 0;
struct drm_display_mode *dup_mode, *next;
LIST_HEAD(list);
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
break;
continue;
if (connector->encoder->crtc == crtc) {
if (need_dup)
dup_mode = drm_mode_duplicate(dev, mode);
else
dup_mode = mode;
ret = drm_mode_attachmode(dev, connector, dup_mode);
if (ret)
return ret;
need_dup = 1;
if (!dup_mode) {
ret = -ENOMEM;
goto out;
}
list_add_tail(&dup_mode->head, &list);
}
return 0;
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder)
continue;
if (connector->encoder->crtc == crtc)
list_move_tail(list.next, &connector->user_modes);
}
 
WARN_ON(!list_empty(&list));
 
out:
list_for_each_entry_safe(dup_mode, next, &list, head)
drm_mode_destroy(dev, dup_mode);
 
return ret;
}
EXPORT_SYMBOL(drm_mode_attachmode_crtc);
 
static int drm_mode_detachmode(struct drm_device *dev,
2028,7 → 2589,7
struct drm_display_mode *mode;
struct drm_mode_object *obj;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2048,9 → 2609,14
goto out;
}
 
drm_crtc_convert_umode(mode, umode);
ret = drm_crtc_convert_umode(mode, umode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
drm_mode_destroy(dev, mode);
goto out;
}
 
ret = drm_mode_attachmode(dev, connector, mode);
drm_mode_attachmode(dev, connector, mode);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
2077,7 → 2643,7
struct drm_connector *connector;
struct drm_display_mode mode;
struct drm_mode_modeinfo *umode = &mode_cmd->mode;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2091,7 → 2657,12
}
connector = obj_to_connector(obj);
 
drm_crtc_convert_umode(&mode, umode);
ret = drm_crtc_convert_umode(&mode, umode);
if (ret) {
DRM_DEBUG_KMS("Invalid mode\n");
goto out;
}
 
ret = drm_mode_detachmode(dev, connector, &mode);
out:
mutex_unlock(&dev->mode_config.mutex);
2103,6 → 2674,7
const char *name, int num_values)
{
struct drm_property *property = NULL;
int ret;
 
property = kzalloc(sizeof(struct drm_property), GFP_KERNEL);
if (!property)
2114,30 → 2686,118
goto fail;
}
 
drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
if (ret)
goto fail;
 
property->flags = flags;
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_blob_list);
 
if (name)
if (name) {
strncpy(property->name, name, DRM_PROP_NAME_LEN);
property->name[DRM_PROP_NAME_LEN-1] = '\0';
}
 
list_add_tail(&property->head, &dev->mode_config.property_list);
return property;
fail:
kfree(property->values);
kfree(property);
return NULL;
}
EXPORT_SYMBOL(drm_property_create);
 
struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags,
const char *name,
const struct drm_prop_enum_list *props,
int num_values)
{
struct drm_property *property;
int i, ret;
 
flags |= DRM_MODE_PROP_ENUM;
 
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
 
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property, i,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
 
return property;
}
EXPORT_SYMBOL(drm_property_create_enum);
 
struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
int flags, const char *name,
const struct drm_prop_enum_list *props,
int num_values)
{
struct drm_property *property;
int i, ret;
 
flags |= DRM_MODE_PROP_BITMASK;
 
property = drm_property_create(dev, flags, name, num_values);
if (!property)
return NULL;
 
for (i = 0; i < num_values; i++) {
ret = drm_property_add_enum(property, i,
props[i].type,
props[i].name);
if (ret) {
drm_property_destroy(dev, property);
return NULL;
}
}
 
return property;
}
EXPORT_SYMBOL(drm_property_create_bitmask);
 
struct drm_property *drm_property_create_range(struct drm_device *dev, int flags,
const char *name,
uint64_t min, uint64_t max)
{
struct drm_property *property;
 
flags |= DRM_MODE_PROP_RANGE;
 
property = drm_property_create(dev, flags, name, 2);
if (!property)
return NULL;
 
property->values[0] = min;
property->values[1] = max;
 
return property;
}
EXPORT_SYMBOL(drm_property_create_range);
 
int drm_property_add_enum(struct drm_property *property, int index,
uint64_t value, const char *name)
{
struct drm_property_enum *prop_enum;
 
if (!(property->flags & DRM_MODE_PROP_ENUM))
if (!(property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)))
return -EINVAL;
 
/*
* Bitmask enum properties have the additional constraint of values
* from 0 to 63
*/
if ((property->flags & DRM_MODE_PROP_BITMASK) && (value > 63))
return -EINVAL;
 
if (!list_empty(&property->enum_blob_list)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (prop_enum->value == value) {
2179,60 → 2839,78
}
EXPORT_SYMBOL(drm_property_destroy);
 
int drm_connector_attach_property(struct drm_connector *connector,
void drm_connector_attach_property(struct drm_connector *connector,
struct drm_property *property, uint64_t init_val)
{
int i;
drm_object_attach_property(&connector->base, property, init_val);
}
EXPORT_SYMBOL(drm_connector_attach_property);
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == 0) {
connector->property_ids[i] = property->base.id;
connector->property_values[i] = init_val;
break;
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
return drm_object_property_set_value(&connector->base, property, value);
}
EXPORT_SYMBOL(drm_connector_property_set_value);
 
int drm_connector_property_get_value(struct drm_connector *connector,
struct drm_property *property, uint64_t *val)
{
return drm_object_property_get_value(&connector->base, property, val);
}
EXPORT_SYMBOL(drm_connector_property_get_value);
 
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t init_val)
{
int count = obj->properties->count;
 
if (count == DRM_OBJECT_MAX_PROPERTY) {
WARN(1, "Failed to attach object property (type: 0x%x). Please "
"increase DRM_OBJECT_MAX_PROPERTY by 1 for each time "
"you see this message on the same object type.\n",
obj->type);
return;
}
EXPORT_SYMBOL(drm_connector_attach_property);
 
int drm_connector_property_set_value(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
obj->properties->ids[count] = property->base.id;
obj->properties->values[count] = init_val;
obj->properties->count++;
}
EXPORT_SYMBOL(drm_object_attach_property);
 
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t val)
{
int i;
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
connector->property_values[i] = value;
break;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
obj->properties->values[i] = val;
return 0;
}
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_property_set_value);
EXPORT_SYMBOL(drm_object_property_set_value);
 
int drm_connector_property_get_value(struct drm_connector *connector,
int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
int i;
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == property->base.id) {
*val = connector->property_values[i];
break;
for (i = 0; i < obj->properties->count; i++) {
if (obj->properties->ids[i] == property->base.id) {
*val = obj->properties->values[i];
return 0;
}
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(drm_connector_property_get_value);
EXPORT_SYMBOL(drm_object_property_get_value);
 
#if 0
int drm_mode_getproperty_ioctl(struct drm_device *dev,
2249,7 → 2927,7
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
uint32_t *blob_id_ptr;
uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
 
2264,7 → 2942,7
}
property = obj_to_property(obj);
 
if (property->flags & DRM_MODE_PROP_ENUM) {
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_blob_list, head)
enum_count++;
} else if (property->flags & DRM_MODE_PROP_BLOB) {
2279,7 → 2957,7
out_resp->flags = property->flags;
 
if ((out_resp->count_values >= value_count) && value_count) {
values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
2289,10 → 2967,10
}
out_resp->count_values = value_count;
 
if (property->flags & DRM_MODE_PROP_ENUM) {
if (property->flags & (DRM_MODE_PROP_ENUM | DRM_MODE_PROP_BITMASK)) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
 
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
2314,8 → 2992,8
if (property->flags & DRM_MODE_PROP_BLOB) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
 
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
2343,6 → 3021,7
void *data)
{
struct drm_property_blob *blob;
int ret;
 
if (!length || !data)
return NULL;
2351,13 → 3030,16
if (!blob)
return NULL;
 
blob->data = (void *)((char *)blob + sizeof(struct drm_property_blob));
ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
if (ret) {
kfree(blob);
return NULL;
}
 
blob->length = length;
 
memcpy(blob->data, data, length);
 
drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
 
list_add_tail(&blob->head, &dev->mode_config.property_blob_list);
return blob;
}
2378,7 → 3060,7
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
void *blob_ptr;
void __user *blob_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2392,7 → 3074,7
blob = obj_to_blob(obj);
 
if (out_resp->length == blob->length) {
blob_ptr = (void *)(unsigned long)out_resp->data;
blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
2410,7 → 3092,7
struct edid *edid)
{
struct drm_device *dev = connector->dev;
int ret = 0, size;
int ret, size;
 
if (connector->edid_blob_ptr)
drm_property_destroy_blob(dev, connector->edid_blob_ptr);
2435,15 → 3117,69
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
 
#if 0
int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
 
static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
struct drm_mode_connector_set_property *out_resp = data;
int ret = -EINVAL;
struct drm_connector *connector = obj_to_connector(obj);
 
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int)value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, value);
 
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, value);
return ret;
}
 
static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_crtc *crtc = obj_to_crtc(obj);
 
if (crtc->funcs->set_property)
ret = crtc->funcs->set_property(crtc, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
 
return ret;
}
 
static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
{
int ret = -EINVAL;
struct drm_plane *plane = obj_to_plane(obj);
 
if (plane->funcs->set_property)
ret = plane->funcs->set_property(plane, property, value);
if (!ret)
drm_object_property_set_value(obj, property, value);
 
return ret;
}
 
int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
struct drm_property *property;
struct drm_connector *connector;
int ret = -EINVAL;
int ret = 0;
int i;
int copied = 0;
int props_count = 0;
uint32_t __user *props_ptr;
uint64_t __user *prop_values_ptr;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
2450,60 → 3186,95
 
mutex_lock(&dev->mode_config.mutex);
 
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR);
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
ret = -EINVAL;
goto out;
}
connector = obj_to_connector(obj);
 
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] == out_resp->prop_id)
break;
if (!obj->properties) {
ret = -EINVAL;
goto out;
}
 
if (i == DRM_CONNECTOR_MAX_PROPERTY) {
props_count = obj->properties->count;
 
/* This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it. */
if ((arg->count_props >= props_count) && props_count) {
copied = 0;
props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
prop_values_ptr = (uint64_t __user *)(unsigned long)
(arg->prop_values_ptr);
for (i = 0; i < props_count; i++) {
if (put_user(obj->properties->ids[i],
props_ptr + copied)) {
ret = -EFAULT;
goto out;
}
 
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
if (put_user(obj->properties->values[i],
prop_values_ptr + copied)) {
ret = -EFAULT;
goto out;
}
property = obj_to_property(obj);
copied++;
}
}
arg->count_props = props_count;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_mode_obj_set_property *arg = data;
struct drm_mode_object *arg_obj;
struct drm_mode_object *prop_obj;
struct drm_property *property;
int ret = -EINVAL;
int i;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
goto out;
if (!arg_obj->properties)
goto out;
 
if (property->flags & DRM_MODE_PROP_RANGE) {
if (out_resp->value < property->values[0])
for (i = 0; i < arg_obj->properties->count; i++)
if (arg_obj->properties->ids[i] == arg->prop_id)
break;
 
if (i == arg_obj->properties->count)
goto out;
 
if (out_resp->value > property->values[1])
prop_obj = drm_mode_object_find(dev, arg->prop_id,
DRM_MODE_OBJECT_PROPERTY);
if (!prop_obj)
goto out;
} else {
int found = 0;
for (i = 0; i < property->num_values; i++) {
if (property->values[i] == out_resp->value) {
found = 1;
property = obj_to_property(prop_obj);
 
if (!drm_property_change_is_valid(property, arg->value))
goto out;
 
switch (arg_obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_mode_connector_set_obj_prop(arg_obj, property,
arg->value);
break;
case DRM_MODE_OBJECT_CRTC:
ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
break;
case DRM_MODE_OBJECT_PLANE:
ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
break;
}
}
if (!found) {
goto out;
}
}
 
/* Do DPMS ourselves */
if (property == connector->dev->mode_config.dpms_property) {
if (connector->funcs->dpms)
(*connector->funcs->dpms)(connector, (int) out_resp->value);
ret = 0;
} else if (connector->funcs->set_property)
ret = connector->funcs->set_property(connector, property, out_resp->value);
 
/* store the property value if successful */
if (!ret)
drm_connector_property_set_value(connector, property, out_resp->value);
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
2540,7 → 3311,7
}
EXPORT_SYMBOL(drm_mode_connector_detach_encoder);
 
bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size)
{
crtc->gamma_size = gamma_size;
2548,10 → 3319,10
crtc->gamma_store = kzalloc(gamma_size * sizeof(uint16_t) * 3, GFP_KERNEL);
if (!crtc->gamma_store) {
crtc->gamma_size = 0;
return false;
return -ENOMEM;
}
 
return true;
return 0;
}
EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
 
2577,6 → 3348,11
}
crtc = obj_to_crtc(obj);
 
if (crtc->funcs->gamma_set == NULL) {
ret = -ENOSYS;
goto out;
}
 
/* memcpy into gamma store */
if (crtc_lut->gamma_size != crtc->gamma_size) {
ret = -EINVAL;
2682,3 → 3458,211
connector->funcs->reset(connector);
}
EXPORT_SYMBOL(drm_mode_config_reset);
/*
* Just need to support RGB formats here for compat with code that doesn't
* use pixel formats directly yet.
*/
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
{
switch (format) {
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
*bpp = 8;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
*depth = 15;
*bpp = 16;
break;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
*depth = 16;
*bpp = 16;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
*depth = 24;
*bpp = 24;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
*depth = 24;
*bpp = 32;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
*depth = 30;
*bpp = 32;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
*depth = 32;
*bpp = 32;
break;
default:
DRM_DEBUG_KMS("unsupported pixel format\n");
*depth = 0;
*bpp = 0;
break;
}
}
EXPORT_SYMBOL(drm_fb_get_bpp_depth);
 
/**
* drm_format_num_planes - get the number of planes for format
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The number of planes used by the specified pixel format.
*/
int drm_format_num_planes(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 3;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(drm_format_num_planes);
 
/**
* drm_format_plane_cpp - determine the bytes per pixel value
* @format: pixel format (DRM_FORMAT_*)
* @plane: plane index
*
* RETURNS:
* The bytes per pixel value for the specified plane.
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
unsigned int depth;
int bpp;
 
if (plane >= drm_format_num_planes(format))
return 0;
 
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
return 2;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 1;
default:
drm_fb_get_bpp_depth(format, &depth, &bpp);
return bpp >> 3;
}
}
EXPORT_SYMBOL(drm_format_plane_cpp);
 
/**
* drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The horizontal chroma subsampling factor for the
* specified pixel format.
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
 
/**
* drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
* @format: pixel format (DRM_FORMAT_*)
*
* RETURNS:
* The vertical chroma subsampling factor for the
* specified pixel format.
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return 2;
default:
return 1;
}
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
/drivers/video/drm/drm_crtc_helper.c
29,22 → 29,27
* Jesse Barnes <jesse.barnes@intel.com>
*/
 
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include <linux/export.h>
#include <linux/moduleparam.h>
 
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
 
static bool drm_kms_helper_poll = true;
 
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
{
struct drm_display_mode *mode, *t;
struct drm_display_mode *mode;
 
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE))
return;
 
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
82,7 → 87,7
uint32_t maxX, uint32_t maxY)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
struct drm_display_mode *mode;
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
int count = 0;
91,7 → 96,7
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
list_for_each_entry(mode, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
 
if (connector->force) {
113,7 → 118,12
goto prune;
}
 
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
count = drm_load_edid_firmware(connector);
if (count == 0)
#endif
count = (*connector_funcs->get_modes)(connector);
 
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
if (count == 0)
131,7 → 141,7
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_validate_flag(connector, mode_flags);
 
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
if (mode->status == MODE_OK)
mode->status = connector_funcs->mode_valid(connector,
mode);
147,7 → 157,7
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
list_for_each_entry(mode, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
258,6 → 268,7
crtc->fb = NULL;
}
}
 
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
342,12 → 353,20
struct drm_encoder *encoder;
bool ret = true;
 
ENTER();
 
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled)
return true;
 
printf("crtc->enabled\n");
 
adjusted_mode = drm_mode_duplicate(dev, mode);
if (!adjusted_mode)
return false;
 
printf("adjusted_mode\n");
 
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
saved_x = crtc->x;
371,11 → 390,16
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
goto done;
}
}
 
printf("list_for_each_entry\n");
printf("mode_fixup %x\n", crtc_funcs->mode_fixup);
 
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto done;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
445,11 → 469,37
crtc->y = saved_y;
}
 
LEAVE();
 
return ret;
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
static int
drm_crtc_helper_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
 
/* Decouple all encoders and their attached connectors from this crtc */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder != encoder)
continue;
 
connector->encoder = NULL;
}
}
 
drm_helper_disable_unused_functions(dev);
return 0;
}
 
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
478,7 → 528,8
struct drm_connector *save_connectors, *connector;
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
struct drm_mode_set save_set;
int ret;
int i;
 
DRM_DEBUG_KMS("\n");
503,8 → 554,7
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
set->mode = NULL;
set->num_connectors = 0;
return drm_crtc_helper_disable(set->crtc);
}
 
dev = set->crtc->dev;
550,6 → 600,12
save_connectors[count++] = *connector;
}
 
save_set.crtc = set->crtc;
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->fb;
 
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
674,7 → 730,7
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
715,6 → 771,12
*connector = save_connectors[count++];
}
 
/* Try to restore the config */
if (mode_changed &&
!drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
 
kfree(save_connectors);
kfree(save_encoders);
kfree(save_crtcs);
805,13 → 867,19
EXPORT_SYMBOL(drm_helper_connector_dpms);
 
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd)
struct drm_mode_fb_cmd2 *mode_cmd)
{
int i;
 
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
fb->pitch = mode_cmd->pitch;
fb->bits_per_pixel = mode_cmd->bpp;
fb->depth = mode_cmd->depth;
for (i = 0; i < 4; i++) {
fb->pitches[i] = mode_cmd->pitches[i];
fb->offsets[i] = mode_cmd->offsets[i];
}
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
 
return 0;
}
913,7 → 981,7
}
 
if (repoll)
queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
void drm_kms_helper_poll_disable(struct drm_device *dev)
938,7 → 1006,7
}
 
if (poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
965,9 → 1033,8
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
if (drm_kms_helper_poll)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
 
 
#endif
/drivers/video/drm/drm_dp_i2c_helper.c
27,8 → 27,8
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/i2c.h>
#include "drm_dp_helper.h"
#include "drmP.h"
#include <drm/drm_dp_helper.h>
#include <drm/drmP.h>
 
/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
static int
/drivers/video/drm/drm_edid.c
30,8 → 30,9
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include "drmP.h"
#include "drm_edid.h"
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "drm_edid_modes.h"
 
#define version_greater(edid, maj, min) \
65,6 → 66,8
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
 
struct detailed_mode_closure {
struct drm_connector *connector;
80,10 → 83,13
#define LEVEL_CVT 3
 
static struct edid_quirk {
char *vendor;
char vendor[4];
int product_id;
u32 quirks;
} edid_quirk_list[] = {
/* ASUS VW222S */
{ "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
119,6 → 125,9
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
 
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
 
/*** DDC fetch and block validation ***/
143,22 → 152,28
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
 
static int edid_fixup __read_mostly = 6;
//module_param_named(edid_fixup, edid_fixup, int, 0400);
//MODULE_PARM_DESC(edid_fixup,
// "Minimum number of valid EDID header bytes (0-8, default 6)");
 
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
* doesn't check out, or 1 if it's valid.
*/
static bool
drm_edid_block_valid(u8 *raw_edid)
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
int i;
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
 
if (raw_edid[0] == 0x00) {
if (edid_fixup > 8 || edid_fixup < 0)
edid_fixup = 6;
 
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
else if (score >= 6) {
else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
201,6 → 216,7
}
return 0;
}
EXPORT_SYMBOL(drm_edid_block_valid);
 
/**
* drm_edid_is_valid - sanity check EDID data
217,7 → 233,7
return false;
 
for (i = 0; i <= edid->extensions; i++)
if (!drm_edid_block_valid(raw + i * EDID_LENGTH))
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
return false;
 
return true;
224,7 → 240,6
}
EXPORT_SYMBOL(drm_edid_is_valid);
 
#define DDC_ADDR 0x50
#define DDC_SEGMENT_ADDR 0x30
/**
* Get EDID information via I2C.
241,6 → 256,8
int block, int len)
{
unsigned char start = block * EDID_LENGTH;
unsigned char segment = block >> 1;
unsigned char xfers = segment ? 3 : 2;
int ret, retries = 5;
 
/* The core i2c driver will automatically retry the transfer if the
252,6 → 269,11
do {
struct i2c_msg msgs[] = {
{
.addr = DDC_SEGMENT_ADDR,
.flags = 0,
.len = 1,
.buf = &segment,
}, {
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
263,12 → 285,23
.buf = buf,
}
};
ret = i2c_transfer(adapter, msgs, 2);
} while (ret != 2 && --retries);
 
return ret == 2 ? 0 : -1;
/*
* Avoid sending the segment addr to not upset non-compliant ddc
* monitors.
*/
ret = i2c_transfer(adapter, &msgs[3 - xfers], xfers);
 
if (ret == -ENXIO) {
DRM_DEBUG_KMS("drm: skipping non-existent adapter %s\n",
adapter->name);
break;
}
} while (ret != xfers && --retries);
 
return ret == xfers ? 0 : -1;
}
 
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
int i;
283,9 → 316,10
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
size_t alloc_size;
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
size_t alloc_size;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
 
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
294,7 → 328,7
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block))
if (drm_edid_block_valid(block, 0, print_bad_edid))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
326,7 → 360,7
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
valid_extensions++;
break;
}
351,8 → 385,11
return block;
 
carp:
if (print_bad_edid) {
dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
}
connector->bad_edid_counter++;
 
out:
kfree(block);
365,7 → 402,7
* \param adapter : i2c device adaptor
* \return 1 on success
*/
static bool
bool
drm_probe_ddc(struct i2c_adapter *adapter)
{
unsigned char out;
372,6 → 409,7
 
return (drm_do_probe_ddc_edid(adapter, &out, 0, 1) == 0);
}
EXPORT_SYMBOL(drm_probe_ddc);
 
/**
* drm_get_edid - get EDID data, if available
391,10 → 429,7
if (drm_probe_ddc(adapter))
edid = (struct edid *)drm_do_get_edid(connector, adapter);
 
connector->display_info.raw_edid = (char *)edid;
 
return edid;
 
}
EXPORT_SYMBOL(drm_get_edid);
 
490,24 → 525,48
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
 
/*
* drm_mode_find_dmt - Create a copy of a mode if present in DMT
* @dev: Device to duplicate against
* @hsize: Mode width
* @vsize: Mode height
* @fresh: Mode refresh rate
* @rb: Mode reduced-blanking-ness
*
* Walk the DMT mode list looking for a match for the given parameters.
* Return a newly allocated copy of the mode, or NULL if not found.
*/
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
int hsize, int vsize, int fresh,
bool rb)
{
struct drm_display_mode *mode = NULL;
int i;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize == ptr->hdisplay &&
vsize == ptr->vdisplay &&
fresh == drm_mode_vrefresh(ptr)) {
/* get the expected default mode */
mode = drm_mode_duplicate(dev, ptr);
break;
if (hsize != ptr->hdisplay)
continue;
if (vsize != ptr->vdisplay)
continue;
if (fresh != drm_mode_vrefresh(ptr))
continue;
if (rb != mode_is_rb(ptr))
continue;
 
return drm_mode_duplicate(dev, ptr);
}
 
return NULL;
}
return mode;
}
EXPORT_SYMBOL(drm_mode_find_dmt);
 
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
516,25 → 575,10
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
u8 rev = ext[0x01], d = ext[0x02];
u8 d = ext[0x02];
u8 *det_base = ext + d;
 
switch (rev) {
case 0:
/* can't happen */
return;
case 1:
/* have to infer how many blocks we have, check pixel clock */
for (i = 0; i < 6; i++)
if (det_base[18*i] || det_base[18*i+1])
n++;
break;
default:
/* explicit count */
n = min(ext[0x03] & 0x0f, 6);
break;
}
 
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
593,7 → 637,7
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
bool ret;
bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
750,10 → 794,17
}
 
/* check whether it can be found in default mode table */
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate);
if (drm_monitor_supports_rb(edid)) {
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate,
true);
if (mode)
return mode;
}
mode = drm_mode_find_dmt(dev, hsize, vsize, vrefresh_rate, false);
if (mode)
return mode;
 
/* okay, generate it */
switch (timing_level) {
case LEVEL_DMT:
break;
767,8 → 818,10
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(edid)) {
kfree(mode);
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(edid),
871,12 → 924,19
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
 
if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
 
goto set_size;
}
 
mode = drm_mode_create(dev);
if (!mode)
return NULL;
 
mode->type = DRM_MODE_TYPE_DRIVER;
 
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
 
900,8 → 960,6
 
drm_mode_do_interlace_quirk(mode, pt);
 
drm_mode_set_name(mode);
 
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
911,6 → 969,7
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
 
set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
 
924,19 → 983,13
mode->height_mm = edid->height_cm * 10;
}
 
mode->type = DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
 
return mode;
}
 
static bool
mode_is_rb(const struct drm_display_mode *mode)
{
return (mode->htotal - mode->hdisplay == 160) &&
(mode->hsync_end - mode->hdisplay == 80) &&
(mode->hsync_end - mode->hsync_start == 32) &&
(mode->vsync_start - mode->vdisplay == 3);
}
 
static bool
mode_in_hsync_range(const struct drm_display_mode *mode,
struct edid *edid, u8 *t)
{
1013,12 → 1066,26
return true;
}
 
/*
* XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
* need to account for them.
*/
static bool valid_inferred_mode(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_display_mode *m;
bool ok = false;
 
list_for_each_entry(m, &connector->probed_modes, head) {
if (mode->hdisplay == m->hdisplay &&
mode->vdisplay == m->vdisplay &&
drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
return false; /* duplicated */
if (mode->hdisplay <= m->hdisplay &&
mode->vdisplay <= m->vdisplay)
ok = true;
}
return ok;
}
 
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
1026,7 → 1093,8
struct drm_device *dev = connector->dev;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
1038,18 → 1106,113
return modes;
}
 
/* fix up 1366x768 mode from 1368x768;
* GFT/CVT can't express 1366 width which isn't dividable by 8
*/
static void fixup_mode_1366x768(struct drm_display_mode *mode)
{
if (mode->hdisplay == 1368 && mode->vdisplay == 768) {
mode->hdisplay = 1366;
mode->hsync_start--;
mode->hsync_end--;
drm_mode_set_name(mode);
}
}
 
static int
drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
for (i = 0; i < num_extra_modes; i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
return modes;
 
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
 
drm_mode_probed_add(connector, newmode);
modes++;
}
 
return modes;
}
 
static int
drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
 
for (i = 0; i < num_extra_modes; i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
return modes;
 
fixup_mode_1366x768(newmode);
if (!mode_in_range(newmode, edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
 
drm_mode_probed_add(connector, newmode);
modes++;
}
 
return modes;
}
 
static void
do_inferred_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
struct detailed_data_monitor_range *range = &data->data.range;
 
if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
if (data->type != EDID_DETAIL_MONITOR_RANGE)
return;
 
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
 
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
 
switch (range->flags) {
case 0x02: /* secondary gtf, XXX could do more */
case 0x00: /* default gtf */
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->edid,
timing);
break;
case 0x04: /* cvt, only in 1.4+ */
if (!version_greater(closure->edid, 1, 3))
break;
 
closure->modes += drm_cvt_modes_for_range(closure->connector,
closure->edid,
timing);
break;
case 0x01: /* just the ranges, no formula */
default:
break;
}
}
 
static int
add_inferred_modes(struct drm_connector *connector, struct edid *edid)
1081,8 → 1244,8
mode = drm_mode_find_dmt(connector->dev,
est3_modes[m].w,
est3_modes[m].h,
est3_modes[m].r
/*, est3_modes[m].rb */);
est3_modes[m].r,
est3_modes[m].rb);
if (mode) {
drm_mode_probed_add(connector, mode);
modes++;
1327,8 → 1490,12
 
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
 
/**
* Search EDID for CEA extension block.
1356,7 → 1523,297
}
EXPORT_SYMBOL(drm_find_cea_extension);
 
static int
do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
u8 * mode, cea_mode;
int modes = 0;
 
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
if (cea_mode < drm_num_cea_modes) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
}
}
}
 
return modes;
}
 
static int
cea_db_payload_len(const u8 *db)
{
return db[0] & 0x1f;
}
 
static int
cea_db_tag(const u8 *db)
{
return db[0] >> 5;
}
 
static int
cea_revision(const u8 *cea)
{
return cea[1];
}
 
static int
cea_db_offsets(const u8 *cea, int *start, int *end)
{
/* Data block offset in CEA extension block */
*start = 4;
*end = cea[2];
if (*end == 0)
*end = 127;
if (*end < 4 || *end > 127)
return -ERANGE;
return 0;
}
 
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
 
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
u8 * cea = drm_find_cea_extension(edid);
u8 * db, dbl;
int modes = 0;
 
if (cea && cea_revision(cea) >= 3) {
int i, start, end;
 
if (cea_db_offsets(cea, &start, &end))
return 0;
 
for_each_cea_db(cea, i, start, end) {
db = &cea[i];
dbl = cea_db_payload_len(db);
 
if (cea_db_tag(db) == VIDEO_BLOCK)
modes += do_cea_modes (connector, db+1, dbl);
}
}
 
return modes;
}
 
static void
parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
{
u8 len = cea_db_payload_len(db);
 
if (len >= 6) {
connector->eld[5] |= (db[6] >> 7) << 1; /* Supports_AI */
connector->dvi_dual = db[6] & 1;
}
if (len >= 7)
connector->max_tmds_clock = db[7] * 5;
if (len >= 8) {
connector->latency_present[0] = db[8] >> 7;
connector->latency_present[1] = (db[8] >> 6) & 1;
}
if (len >= 9)
connector->video_latency[0] = db[9];
if (len >= 10)
connector->audio_latency[0] = db[10];
if (len >= 11)
connector->video_latency[1] = db[11];
if (len >= 12)
connector->audio_latency[1] = db[12];
 
DRM_LOG_KMS("HDMI: DVI dual %d, "
"max TMDS clock %d, "
"latency present %d %d, "
"video latency %d %d, "
"audio latency %d %d\n",
connector->dvi_dual,
connector->max_tmds_clock,
(int) connector->latency_present[0],
(int) connector->latency_present[1],
connector->video_latency[0],
connector->video_latency[1],
connector->audio_latency[0],
connector->audio_latency[1]);
}
 
static void
monitor_name(struct detailed_timing *t, void *data)
{
if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
*(u8 **)data = t->data.other_data.data.str.str;
}
 
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
int hdmi_id;
 
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
 
if (cea_db_payload_len(db) < 5)
return false;
 
hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
 
return hdmi_id == HDMI_IDENTIFIER;
}
 
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
* @edid: EDID to parse
*
* Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
* Some ELD fields are left to the graphics driver caller:
* - Conn_Type
* - HDCP
* - Port_ID
*/
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
{
uint8_t *eld = connector->eld;
u8 *cea;
u8 *name;
u8 *db;
int sad_count = 0;
int mnl;
int dbl;
 
memset(eld, 0, sizeof(connector->eld));
 
cea = drm_find_cea_extension(edid);
if (!cea) {
DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
return;
}
 
name = NULL;
drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
for (mnl = 0; name && mnl < 13; mnl++) {
if (name[mnl] == 0x0a)
break;
eld[20 + mnl] = name[mnl];
}
eld[4] = (cea[1] << 5) | mnl;
DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
 
eld[0] = 2 << 3; /* ELD version: 2 */
 
eld[16] = edid->mfg_id[0];
eld[17] = edid->mfg_id[1];
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
 
if (cea_revision(cea) >= 3) {
int i, start, end;
 
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
end = 0;
}
 
for_each_cea_db(cea, i, start, end) {
db = &cea[i];
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = dbl / 3;
if (dbl >= 1)
memcpy(eld + 20 + mnl, &db[1], dbl);
break;
case SPEAKER_BLOCK:
/* Speaker Allocation Data Block */
if (dbl >= 1)
eld[7] = db[1];
break;
case VENDOR_BLOCK:
/* HDMI Vendor-Specific Data Block */
if (cea_db_is_hdmi_vsdb(db))
parse_hdmi_vsdb(connector, db);
break;
default:
break;
}
}
}
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
 
DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
}
EXPORT_SYMBOL(drm_edid_to_eld);
 
/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
*/
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
int a, v;
 
if (!connector->latency_present[0])
return 0;
if (!connector->latency_present[1])
i = 0;
 
a = connector->audio_latency[i];
v = connector->video_latency[i];
 
/*
* HDMI/DP sink doesn't support audio or video?
*/
if (a == 255 || v == 255)
return 0;
 
/*
* Convert raw EDID values to millisecond.
* Treat unknown latency as 0ms.
*/
if (a)
a = min(2 * (a - 1), 500);
if (v)
v = min(2 * (v - 1), 500);
 
return max(v - a, 0);
}
EXPORT_SYMBOL(drm_av_sync_delay);
 
/**
* drm_select_eld - select one ELD from multiple HDMI/DP sinks
* @encoder: the encoder just changed display mode
* @mode: the adjusted display mode
*
* It's possible for one encoder to be associated with multiple HDMI/DP sinks.
* The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
*/
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_connector *connector;
struct drm_device *dev = encoder->dev;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
if (connector->encoder == encoder && connector->eld[0])
return connector;
 
return NULL;
}
EXPORT_SYMBOL(drm_select_eld);
 
/**
* drm_detect_hdmi_monitor - detect whether monitor is hdmi.
* @edid: monitor EDID information
*
1366,38 → 1823,26
bool drm_detect_hdmi_monitor(struct edid *edid)
{
u8 *edid_ext;
int i, hdmi_id;
int i;
int start_offset, end_offset;
bool is_hdmi = false;
 
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
goto end;
return false;
 
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
return false;
 
/*
* Because HDMI identifier is in Vendor Specific Block,
* search it from all data blocks of CEA extension.
*/
for (i = start_offset; i < end_offset;
/* Increased by data block len */
i += ((edid_ext[i] & 0x1f) + 1)) {
/* Find vendor specific block */
if ((edid_ext[i] >> 5) == VENDOR_BLOCK) {
hdmi_id = edid_ext[i + 1] | (edid_ext[i + 2] << 8) |
edid_ext[i + 3] << 16;
/* Find HDMI identifier */
if (hdmi_id == HDMI_IDENTIFIER)
is_hdmi = true;
break;
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_is_hdmi_vsdb(&edid_ext[i]))
return true;
}
}
 
end:
return is_hdmi;
return false;
}
EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
1429,15 → 1874,13
goto end;
}
 
/* Data block offset in CEA extension block */
start_offset = 4;
end_offset = edid_ext[2];
if (cea_db_offsets(edid_ext, &start_offset, &end_offset))
goto end;
 
for (i = start_offset; i < end_offset;
i += ((edid_ext[i] & 0x1f) + 1)) {
if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
for_each_cea_db(edid_ext, i, start_offset, end_offset) {
if (cea_db_tag(&edid_ext[i]) == AUDIO_BLOCK) {
has_audio = true;
for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
for (j = 1; j < cea_db_payload_len(&edid_ext[i]) + 1; j += 3)
DRM_DEBUG_KMS("CEA audio format %d\n",
(edid_ext[i + j] >> 3) & 0xf);
goto end;
1469,13 → 1912,29
info->bpc = 0;
info->color_formats = 0;
 
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
if (edid->revision < 3)
return;
 
if (!(edid->input & DRM_EDID_INPUT_DIGITAL))
return;
 
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (edid_ext) {
info->cea_rev = edid_ext[1];
 
/* The existence of a CEA block should imply RGB support */
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (edid_ext[3] & EDID_CEA_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid_ext[3] & EDID_CEA_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
 
/* Only defined for 1.4 with digital displays */
if (edid->revision < 4)
return;
 
switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) {
case DRM_EDID_DIGITAL_DEPTH_6:
info->bpc = 6;
1501,18 → 1960,11
break;
}
 
info->color_formats = DRM_COLOR_FORMAT_RGB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats = DRM_COLOR_FORMAT_YCRCB444;
if (info->color_formats & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats = DRM_COLOR_FORMAT_YCRCB422;
 
/* Get data from CEA blocks if present */
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return;
 
info->cea_rev = edid_ext[1];
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB444)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
}
 
/**
1559,6 → 2011,7
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
 
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
/drivers/video/drm/drm_edid_modes.h
24,13 → 24,12
*/
 
#include <linux/kernel.h>
#include "drmP.h"
#include "drm_edid.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
 
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
* But the mode with Reduced blank feature is deleted.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
81,12 → 80,16
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@120Hz RB */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
880, 960, 0, 600, 603, 607, 636, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
106,10 → 109,18
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@120Hz RB */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
1104, 1184, 0, 768, 771, 775, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 790, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
122,6 → 133,14
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@120Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 823, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
134,6 → 153,10
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@120Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 847, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
142,6 → 165,10
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@120Hz RB */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
1360, 1440, 0, 960, 963, 967, 1017, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
154,22 → 181,42
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@120Hz RB */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
/* 1360x768@120Hz RB */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
1440, 1520, 0, 768, 771, 776, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@75Hz */
/* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@85Hz */
/* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@120Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 926, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
182,6 → 229,10
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@120Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 953, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
202,6 → 253,14
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@120Hz RB */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
214,15 → 273,23
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@120Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1729x1344@75Hz */
/* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
/* 1792x1344@120Hz RB */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
230,6 → 297,14
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@120Hz RB */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
242,6 → 317,10
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@120Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
250,6 → 329,14
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@120Hz RB */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
262,6 → 349,11
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@120Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
 
};
static const int drm_num_dmt_modes =
sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
303,7 → 395,7
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
320,12 → 412,14
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
 
static const struct {
struct minimode {
short w;
short h;
short r;
short rb;
} est3_modes[] = {
};
 
static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
377,4 → 471,304
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
static const int num_est3_modes = ARRAY_SIZE(est3_modes);
 
static const struct minimode extra_modes[] = {
{ 1024, 576, 60, 0 },
{ 1366, 768, 60, 0 },
{ 1600, 900, 60, 0 },
{ 1680, 945, 60, 0 },
{ 1920, 1080, 60, 0 },
{ 2048, 1152, 60, 0 },
{ 2048, 1536, 60, 0 },
};
static const int num_extra_modes = ARRAY_SIZE(extra_modes);
 
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
/* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
/drivers/video/drm/drm_fb_helper.c
31,10 → 31,11
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
#include "drm_crtc_helper.h"
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
 
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
87,6 → 88,9
{
uint16_t *r_base, *g_base, *b_base;
 
if (crtc->funcs->gamma_set == NULL)
return;
 
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
95,92 → 99,33
}
 
 
static void drm_fb_helper_on(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i, j;
 
/*
* For each CRTC in this fb, turn the crtc on then,
* find all associated encoders and turn them on.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
if (!crtc->enabled)
continue;
 
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 
/* Walk the connectors & encoders on this fb turning them on */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
DRM_MODE_DPMS_ON);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct drm_encoder_helper_funcs *encoder_funcs;
 
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
}
}
mutex_unlock(&dev->mode_config.mutex);
}
 
static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *crtc_funcs;
struct drm_connector *connector;
struct drm_encoder *encoder;
int i, j;
 
/*
* For each CRTC in this fb, find all associated encoders
* and turn them off, then turn off the CRTC.
* For each CRTC in this fb, turn the connectors on/off.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
 
if (!crtc->enabled)
continue;
 
/* Walk the connectors on this fb and mark them off */
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
connector->dpms = dpms_mode;
connector->funcs->dpms(connector, dpms_mode);
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property,
dpms_mode);
dev->mode_config.dpms_property, dpms_mode);
}
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct drm_encoder_helper_funcs *encoder_funcs;
 
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, dpms_mode);
}
}
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
mutex_unlock(&dev->mode_config.mutex);
}
 
189,23 → 134,23
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_on(info);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
219,8 → 164,11
for (i = 0; i < helper->connector_count; i++)
kfree(helper->connector_info[i]);
kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++)
for (i = 0; i < helper->crtc_count; i++) {
kfree(helper->crtc_info[i].mode_set.connectors);
if (helper->crtc_info[i].mode_set.mode)
drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
}
kfree(helper->crtc_info);
}
 
229,7 → 177,6
int crtc_count, int max_conn_count)
{
struct drm_crtc *crtc;
int ret = 0;
int i;
 
fb_helper->dev = dev;
254,20 → 201,17
sizeof(struct drm_connector *),
GFP_KERNEL);
 
if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
if (!fb_helper->crtc_info[i].mode_set.connectors)
goto out_free;
}
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
 
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
fb_helper->crtc_info[i].crtc_id = crtc->base.id;
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
fb_helper->conn_limit = max_conn_count;
 
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
389,9 → 333,13
return -EINVAL;
 
/* Need to resize the fb object !!! */
if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
if (var->bits_per_pixel > fb->bits_per_pixel ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
"object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
548,8 → 496,41
sizes.fb_width = (unsigned)-1;
sizes.fb_height = (unsigned)-1;
 
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
if (preferred_bpp != sizes.surface_bpp) {
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
 
cmdline_mode = &fb_helper_conn->cmdline_mode;
 
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
sizes.surface_depth = 15;
sizes.surface_bpp = 16;
break;
case 16:
sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
break;
}
break;
}
}
 
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
904,7 → 885,6
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
915,11 → 895,6
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
 
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
 
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
992,7 → 967,7
int count = 0;
 
/* disable all the possible outputs/crtcs before entering KMS mode */
// drm_helper_disable_unused_functions(fb_helper->dev);
drm_helper_disable_unused_functions(fb_helper->dev);
 
// drm_fb_helper_parse_command_line(fb_helper);
 
/drivers/video/drm/drm_irq.c
33,7 → 33,7
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#include "drmP.h"
#include <drm/drmP.h>
#include <asm/div64.h>
//#include "drm_trace.h"
 
41,6 → 41,7
#include <linux/slab.h>
 
//#include <linux/vgaarb.h>
#include <linux/export.h>
 
/* Access macro for slots in vblank timestamp ringbuffer. */
#define vblanktimestamp(dev, crtc, count) ( \
134,3 → 135,52
(int) linedur_ns, (int) pixeldur_ns);
}
 
 
/**
* drm_vblank_pre_modeset - account for vblanks across mode sets
* @dev: DRM device
* @crtc: CRTC in question
*
* Account for vblank events across mode setting events, which will likely
* reset the hardware frame counter.
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
#if 0
/* vblank is not initialized (IRQ not installed ?) */
if (!dev->num_crtcs)
return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
dev->vblank_inmodeset[crtc] |= 0x2;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
 
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
#if 0
unsigned long irqflags;
 
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
if (dev->vblank_inmodeset[crtc] & 0x2)
drm_vblank_put(dev, crtc);
 
dev->vblank_inmodeset[crtc] = 0;
}
#endif
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
/drivers/video/drm/drm_mm.c
41,10 → 41,11
* Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
 
#include "drmP.h"
#include "drm_mm.h"
#include <drm/drmP.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/export.h>
 
#define MM_UNUSED_TARGET 4
 
117,39 → 118,46
 
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment)
unsigned long size, unsigned alignment,
unsigned long color)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
 
BUG_ON(!hole_node->hole_follows || node->allocated);
 
if (alignment)
tmp = hole_start % alignment;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (!tmp) {
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
adj_start += alignment - tmp;
}
 
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
} else
wasted = alignment - tmp;
list_del(&hole_node->hole_stack);
}
 
node->start = hole_start + wasted;
node->start = adj_start;
node->size = size;
node->mm = mm;
node->color = color;
node->allocated = 1;
 
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start + node->size > hole_end);
BUG_ON(node->start + node->size > adj_end);
 
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
 
156,6 → 164,7
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
int atomic)
{
struct drm_mm_node *node;
164,7 → 173,7
if (unlikely(node == NULL))
return NULL;
 
drm_mm_insert_helper(hole_node, node, size, alignment);
drm_mm_insert_helper(hole_node, node, size, alignment, color);
 
return node;
}
180,11 → 189,11
{
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free(mm, size, alignment, 0);
hole_node = drm_mm_search_free(mm, size, alignment, false);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper(hole_node, node, size, alignment);
drm_mm_insert_helper(hole_node, node, size, alignment, 0);
 
return 0;
}
193,44 → 202,50
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
unsigned long start, unsigned long end)
{
struct drm_mm *mm = hole_node->mm;
unsigned long tmp = 0, wasted = 0;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
unsigned long hole_end = drm_mm_hole_node_end(hole_node);
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
 
BUG_ON(!hole_node->hole_follows || node->allocated);
 
if (hole_start < start)
wasted += start - hole_start;
if (alignment)
tmp = (hole_start + wasted) % alignment;
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
if (adj_start < start)
adj_start = start;
 
if (alignment) {
unsigned tmp = adj_start % alignment;
if (tmp)
wasted += alignment - tmp;
adj_start += alignment - tmp;
}
 
if (!wasted) {
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del_init(&hole_node->hole_stack);
list_del(&hole_node->hole_stack);
}
 
node->start = hole_start + wasted;
node->start = adj_start;
node->size = size;
node->mm = mm;
node->color = color;
node->allocated = 1;
 
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
 
BUG_ON(node->start + node->size > hole_end);
BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
 
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
} else {
node->hole_follows = 0;
}
}
 
237,6 → 252,7
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int atomic)
247,7 → 263,7
if (unlikely(node == NULL))
return NULL;
 
drm_mm_insert_helper_range(hole_node, node, size, alignment,
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
start, end);
 
return node;
266,11 → 282,11
struct drm_mm_node *hole_node;
 
hole_node = drm_mm_search_free_in_range(mm, size, alignment,
start, end, 0);
start, end, false);
if (!hole_node)
return -ENOSPC;
 
drm_mm_insert_helper_range(hole_node, node, size, alignment,
drm_mm_insert_helper_range(hole_node, node, size, alignment, 0,
start, end);
 
return 0;
335,8 → 351,6
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
unsigned wasted = 0;
 
if (end - start < size)
return 0;
 
343,19 → 357,17
if (alignment) {
unsigned tmp = start % alignment;
if (tmp)
wasted = alignment - tmp;
start += alignment - tmp;
}
 
if (end >= start + size + wasted) {
return 1;
return end >= start + size;
}
 
return 0;
}
 
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
unsigned alignment,
unsigned long color,
bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
367,10 → 379,17
best_size = ~0UL;
 
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry);
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
 
BUG_ON(!entry->hole_follows);
if (!check_free_hole(drm_mm_hole_node_start(entry),
drm_mm_hole_node_end(entry),
size, alignment))
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
if (!best_match)
384,14 → 403,15
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free);
EXPORT_SYMBOL(drm_mm_search_free_generic);
 
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
int best_match)
bool best_match)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
409,6 → 429,13
end : drm_mm_hole_node_end(entry);
 
BUG_ON(!entry->hole_follows);
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
continue;
}
 
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
423,7 → 450,7
 
return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
 
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
436,6 → 463,7
new->mm = old->mm;
new->start = old->start;
new->size = old->size;
new->color = old->color;
 
old->allocated = 0;
new->allocated = 1;
451,9 → 479,12
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment)
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
473,11 → 504,14
* Warning: As long as the scan list is non-empty, no other operations than
* adding/removing nodes to/from the scan list are allowed.
*/
void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
void drm_mm_init_scan_with_range(struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end)
{
mm->scan_color = color;
mm->scan_alignment = alignment;
mm->scan_size = size;
mm->scanned_blocks = 0;
521,14 → 555,18
 
hole_start = drm_mm_hole_node_start(prev_node);
hole_end = drm_mm_hole_node_end(prev_node);
if (mm->scan_check_range) {
adj_start = hole_start < mm->scan_start ?
mm->scan_start : hole_start;
adj_end = hole_end > mm->scan_end ?
mm->scan_end : hole_end;
} else {
 
adj_start = hole_start;
adj_end = hole_end;
 
if (mm->color_adjust)
mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
 
if (mm->scan_check_range) {
if (adj_start < mm->scan_start)
adj_start = mm->scan_start;
if (adj_end > mm->scan_end)
adj_end = mm->scan_end;
}
 
if (check_free_hole(adj_start , adj_end,
615,6 → 653,8
mm->head_node.size = start - mm->head_node.start;
list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
 
mm->color_adjust = NULL;
 
return 0;
}
EXPORT_SYMBOL(drm_mm_init);
/drivers/video/drm/drm_modes.c
32,9 → 32,9
 
#include <linux/list.h>
#include <linux/list_sort.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
 
/**
* drm_mode_debug_printmodeline - debug print a mode
685,8 → 685,6
p->crtc_vsync_end /= 2;
p->crtc_vtotal /= 2;
}
 
p->crtc_vtotal |= 1;
}
 
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
707,14 → 705,32
p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal);
p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay);
p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal);
 
p->crtc_hadjusted = false;
p->crtc_vadjusted = false;
}
EXPORT_SYMBOL(drm_mode_set_crtcinfo);
 
 
/**
* drm_mode_copy - copy the mode
* @dst: mode to overwrite
* @src: mode to copy
*
* LOCKING:
* None.
*
* Copy an existing mode into another mode, preserving the object id
* of the destination mode.
*/
void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src)
{
int id = dst->base.id;
 
*dst = *src;
dst->base.id = id;
INIT_LIST_HEAD(&dst->head);
}
EXPORT_SYMBOL(drm_mode_copy);
 
/**
* drm_mode_duplicate - allocate and duplicate an existing mode
* @m: mode to duplicate
*
728,16 → 744,13
const struct drm_display_mode *mode)
{
struct drm_display_mode *nmode;
int new_id;
 
nmode = drm_mode_create(dev);
if (!nmode)
return NULL;
 
new_id = nmode->base.id;
*nmode = *mode;
nmode->base.id = new_id;
INIT_LIST_HEAD(&nmode->head);
drm_mode_copy(nmode, mode);
 
return nmode;
}
EXPORT_SYMBOL(drm_mode_duplicate);
/drivers/video/drm/drm_pci.c
0,0 → 1,140
/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
/**
* \file drm_pci.c
* \brief Functions and ioctls to manage PCI memory
*
* \warning These interfaces aren't stable yet.
*
* \todo Implement the remaining ioctl's for the PCI pools.
* \todo The wrappers here are so thin that they would be better off inlined..
*
* \author José Fonseca <jrfonseca@tungstengraphics.com>
* \author Leif Delgass <ldelgass@retinalburn.net>
*/
 
/*
* Copyright 2003 José Fonseca.
* Copyright 2003 Leif Delgass.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
 
//#include <linux/pci.h>
//#include <linux/slab.h>
//#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <drm/drmP.h>
 
#include <syscall.h>
 
/**********************************************************************/
/** \name PCI memory */
/*@{*/
 
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
unsigned long addr;
size_t sz;
#endif
 
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
* Return NULL here for now to make sure nobody tries for larger alignment
*/
if (align > size)
return NULL;
 
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
 
dmah->size = size;
dmah->vaddr = (void*)KernelAlloc(size);
dmah->busaddr = GetPgAddr(dmah->vaddr);
 
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
 
memset(dmah->vaddr, 0, size);
 
return dmah;
}
 
 
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
u32 lnkcap, lnkcap2;
 
*mask = 0;
if (!dev->pdev)
return -EINVAL;
 
if (!pci_is_pcie(dev->pdev))
return -EINVAL;
 
return -EINVAL;
 
#if 0
root = dev->pdev->bus->self;
 
pos = pci_pcie_cap(root);
if (!pos)
return -EINVAL;
 
/* we've been informed via and serverworks don't make the cut */
// if (root->vendor == PCI_VENDOR_ID_VIA ||
// root->vendor == PCI_VENDOR_ID_SERVERWORKS)
// return -EINVAL;
 
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
 
lnkcap &= PCI_EXP_LNKCAP_SLS;
lnkcap2 &= 0xfe;
 
if (lnkcap2) { /* PCIE GEN 3.0 */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
} else {
if (lnkcap & 1)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap & 2)
*mask |= DRM_PCIE_SPEED_50;
}
 
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
return 0;
#endif
 
}
EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
/drivers/video/drm/drm_stub.c
0,0 → 1,109
/**
* \file drm_stub.h
* Stub support
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
 
/*
* Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
*
* Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#include <linux/module.h>
 
#include <linux/slab.h>
#include <drm/drmP.h>
 
struct va_format {
const char *fmt;
va_list *va;
};
 
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
 
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
 
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
EXPORT_SYMBOL(drm_timestamp_precision);
 
int drm_err(const char *func, const char *format, ...)
{
struct va_format vaf;
va_list args;
int r;
 
va_start(args, format);
 
vaf.fmt = format;
vaf.va = &args;
 
r = printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf);
 
va_end(args);
 
return r;
}
EXPORT_SYMBOL(drm_err);
 
void drm_ut_debug_printk(unsigned int request_level,
const char *prefix,
const char *function_name,
const char *format, ...)
{
va_list args;
 
// if (drm_debug & request_level) {
// if (function_name)
// printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
// va_start(args, format);
// vprintk(format, args);
// va_end(args);
// }
}
EXPORT_SYMBOL(drm_ut_debug_printk);
 
/**
* Compute size order. Returns the exponent of the smaller power of two which
* is greater or equal to given number.
*
* \param size size.
* \return order.
*
* \todo Can be made faster.
*/
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
 
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 
if (size & (size - 1))
++order;
 
return order;
}
/drivers/video/drm/i2c/i2c-algo-bit.c
15,7 → 15,8
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA.
* ------------------------------------------------------------------------- */
 
/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
24,11 → 25,13
#include <types.h>
#include <list.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <syscall.h>
#include <errno.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
 
#define I2C_FUNC_NOSTART 0x00000010 /* I2C_M_NOSTART */
 
/* ----- global defines ----------------------------------------------- */
 
40,13 → 43,19
} while (0)
#else
#define bit_dbg(level, dev, format, args...) \
do {} while (0)
do { /* dbgprintf(format, ##args); */ } while (0)
#endif /* DEBUG */
 
/* ----- global variables --------------------------------------------- */
 
static int bit_test; /* see if the line-setting functions work */
static int bit_test = 0; /* see if the line-setting functions work */
 
#ifdef DEBUG
static int i2c_debug = 1;
module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(i2c_debug,
"debug level - 0 off; 1 normal; 2 verbose; 3 very verbose");
#endif
 
/* --- setting states on the bus with the right timing: --------------- */
 
87,7 → 96,7
if (!adap->getscl)
goto done;
 
// start = jiffies;
start = GetTimerTicks();
while (!getscl(adap)) {
/* This hw knows how to read the clock line, so we wait
* until it actually gets high. This is safer as some
94,19 → 103,16
* chips may hold it low ("clock stretching") while they
* are processing data internally.
*/
// if (time_after(jiffies, start + adap->timeout))
// return -ETIMEDOUT;
 
udelay(adap->udelay);
 
// cond_resched();
if (time_after(GetTimerTicks(), start + adap->timeout)) {
/* Test one last time, as we may have been preempted
* between last check and timeout test.
*/
if (getscl(adap))
break;
return -ETIMEDOUT;
}
#ifdef DEBUG
if (jiffies != start && i2c_debug >= 3)
pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
"high\n", jiffies - start);
#endif
 
udelay(1);
}
done:
udelay(adap->udelay);
return 0;
239,12 → 245,14
}
 
if (adap->getscl == NULL)
pr_info("%s: Testing SDA only, SCL is not readable\n", name);
dbgprintf("%s: Testing SDA only, SCL is not readable\n", name);
 
sda = getsda(adap);
scl = (adap->getscl == NULL) ? 1 : getscl(adap);
if (!scl || !sda) {
printk(KERN_WARNING "%s: bus seems to be busy\n", name);
printk(KERN_WARNING
"%s: bus seems to be busy (scl=%d, sda=%d)\n",
name, scl, sda);
goto bailout;
}
 
303,7 → 311,7
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
 
pr_info("%s: Test OK\n", name);
dbgprintf("%s: Test OK\n", name);
return 0;
bailout:
sdahi(adap);
372,7 → 380,7
* the SMBus PEC was wrong.
*/
} else if (retval == 0) {
// dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
dev_err(&i2c_adap->dev, "sendbytes: NAK bailout.\n");
return -EIO;
 
/* Timeout; or (someday) lost arbitration
383,8 → 391,8
* to know or care about this ... it is *NOT* an error.
*/
} else {
// dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
// retval);
dev_err(&i2c_adap->dev, "sendbytes: error %d\n",
retval);
return retval;
}
}
400,8 → 408,8
setsda(adap, 0);
udelay((adap->udelay + 1) / 2);
if (sclhi(adap) < 0) { /* timeout */
// dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n");
// return -ETIMEDOUT;
dev_err(&i2c_adap->dev, "readbytes: ack/nak timeout\n");
return -ETIMEDOUT;
}
scllo(adap);
return 0;
433,9 → 441,9
if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
if (!(flags & I2C_M_NO_RD_ACK))
acknak(i2c_adap, 0);
// dev_err(&i2c_adap->dev, "readbytes: invalid "
// "block length (%d)\n", inval);
return -EREMOTEIO;
dev_err(&i2c_adap->dev, "readbytes: invalid "
"block length (%d)\n", inval);
return -EPROTO;
}
/* The original count value accounts for the extra
bytes, that is, either 1 for a regular transaction,
464,7 → 472,7
* reads, writes as well as 10bit-addresses.
* returns:
* 0 everything went okay, the chip ack'ed, or IGNORE_NAK flag was set
* -x an error occurred (like: -EREMOTEIO if the device did not answer, or
* -x an error occurred (like: -ENXIO if the device did not answer, or
* -ETIMEDOUT, for example if the lines are stuck...)
*/
static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
480,21 → 488,21
 
if (flags & I2C_M_TEN) {
/* a ten bit address */
addr = 0xf0 | ((msg->addr >> 7) & 0x03);
addr = 0xf0 | ((msg->addr >> 7) & 0x06);
bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
/* try extended address code...*/
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok) {
// dev_err(&i2c_adap->dev,
// "died at extended address code\n");
return -EREMOTEIO;
dev_err(&i2c_adap->dev,
"died at extended address code\n");
return -ENXIO;
}
/* the remaining 8 bit address */
ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
ret = i2c_outb(i2c_adap, msg->addr & 0xff);
if ((ret != 1) && !nak_ok) {
/* the chip did not ack / xmission error occurred */
// dev_err(&i2c_adap->dev, "died at 2nd address code\n");
return -EREMOTEIO;
dev_err(&i2c_adap->dev, "died at 2nd address code\n");
return -ENXIO;
}
if (flags & I2C_M_RD) {
bit_dbg(3, &i2c_adap->dev, "emitting repeated "
504,9 → 512,9
addr |= 0x01;
ret = try_address(i2c_adap, addr, retries);
if ((ret != 1) && !nak_ok) {
// dev_err(&i2c_adap->dev,
// "died at repeated address code\n");
return -EREMOTEIO;
dev_err(&i2c_adap->dev,
"died at repeated address code\n");
return -EIO;
}
}
} else { /* normal 7bit address */
531,7 → 539,6
int i, ret;
unsigned short nak_ok;
 
//ENTER();
if (adap->pre_xfer) {
ret = adap->pre_xfer(i2c_adap);
if (ret < 0)
565,7 → 572,7
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
ret = -EREMOTEIO;
ret = -EIO;
goto bailout;
}
} else {
576,7 → 583,7
ret, ret == 1 ? "" : "s");
if (ret < pmsg->len) {
if (ret >= 0)
ret = -EREMOTEIO;
ret = -EIO;
goto bailout;
}
}
586,7 → 593,6
bailout:
bit_dbg(3, &i2c_adap->dev, "emitting stop condition\n");
i2c_stop(adap);
// LEAVE();
 
if (adap->post_xfer)
adap->post_xfer(i2c_adap);
595,7 → 601,7
 
static u32 bit_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
604,10 → 610,11
 
/* -----exported algorithm data: ------------------------------------- */
 
static const struct i2c_algorithm i2c_bit_algo = {
const struct i2c_algorithm i2c_bit_algo = {
.master_xfer = bit_xfer,
.functionality = bit_func,
};
EXPORT_SYMBOL(i2c_bit_algo);
 
/*
* registering functions to load algorithms at runtime
620,7 → 627,7
 
if (bit_test) {
ret = test_bus(adap);
if (ret < 0)
if (bit_test >= 2 && ret < 0)
return -ENODEV;
}
 
628,6 → 635,11
adap->algo = &i2c_bit_algo;
adap->retries = 3;
 
/* Complain if SCL can't be read */
if (bit_adap->getscl == NULL) {
dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
dev_warn(&adap->dev, "Bus may be unreliable\n");
}
return 0;
}
 
/drivers/video/drm/i2c/i2c-core.c
14,15 → 14,20
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301 USA. */
/* ------------------------------------------------------------------------- */
 
/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
Jean Delvare <khali@linux-fr.org> */
Jean Delvare <khali@linux-fr.org>
Mux support by Rodolfo Giometti <giometti@enneenne.com> and
Michael Lawnick <michael.lawnick.ext@nsn.com> */
 
#include <types.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <list.h>
#include <errno.h>
#include <linux/i2c.h>
29,7 → 34,254
#include <syscall.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
#if 0
 
static ssize_t
show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
}
 
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
static struct attribute *i2c_dev_attrs[] = {
&dev_attr_name.attr,
/* modalias helps coldplug: modprobe $(cat .../modalias) */
&dev_attr_modalias.attr,
NULL
};
 
static struct attribute_group i2c_dev_attr_group = {
.attrs = i2c_dev_attrs,
};
 
static const struct attribute_group *i2c_dev_attr_groups[] = {
&i2c_dev_attr_group,
NULL
};
 
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
.freeze = i2c_device_pm_freeze,
.thaw = i2c_device_pm_thaw,
.poweroff = i2c_device_pm_poweroff,
.restore = i2c_device_pm_restore,
SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend,
pm_generic_runtime_resume,
pm_generic_runtime_idle
)
};
 
struct bus_type i2c_bus_type = {
.name = "i2c",
.match = i2c_device_match,
.probe = i2c_device_probe,
.remove = i2c_device_remove,
.shutdown = i2c_device_shutdown,
.pm = &i2c_device_pm_ops,
};
EXPORT_SYMBOL_GPL(i2c_bus_type);
 
static struct device_type i2c_client_type = {
.groups = i2c_dev_attr_groups,
.uevent = i2c_device_uevent,
.release = i2c_client_dev_release,
};
 
 
/**
* i2c_verify_client - return parameter as i2c_client, or NULL
* @dev: device, probably from some driver model iterator
*
* When traversing the driver model tree, perhaps using driver model
* iterators like @device_for_each_child(), you can't assume very much
* about the nodes you find. Use this function to avoid oopses caused
* by wrongly treating some non-I2C device as an i2c_client.
*/
struct i2c_client *i2c_verify_client(struct device *dev)
{
return (dev->type == &i2c_client_type)
? to_i2c_client(dev)
: NULL;
}
EXPORT_SYMBOL(i2c_verify_client);
 
 
/* This is a permissive address validity check, I2C address map constraints
* are purposely not enforced, except for the general call address. */
static int i2c_check_client_addr_validity(const struct i2c_client *client)
{
if (client->flags & I2C_CLIENT_TEN) {
/* 10-bit address, all values are valid */
if (client->addr > 0x3ff)
return -EINVAL;
} else {
/* 7-bit address, reject the general call address */
if (client->addr == 0x00 || client->addr > 0x7f)
return -EINVAL;
}
return 0;
}
 
/* And this is a strict address validity check, used when probing. If a
* device uses a reserved address, then it shouldn't be probed. 7-bit
* addressing is assumed, 10-bit address devices are rare and should be
* explicitly enumerated. */
static int i2c_check_addr_validity(unsigned short addr)
{
/*
* Reserved addresses per I2C specification:
* 0x00 General call address / START byte
* 0x01 CBUS address
* 0x02 Reserved for different bus format
* 0x03 Reserved for future purposes
* 0x04-0x07 Hs-mode master code
* 0x78-0x7b 10-bit slave addressing
* 0x7c-0x7f Reserved for future purposes
*/
if (addr < 0x08 || addr > 0x77)
return -EINVAL;
return 0;
}
 
static int __i2c_check_addr_busy(struct device *dev, void *addrp)
{
struct i2c_client *client = i2c_verify_client(dev);
int addr = *(int *)addrp;
 
if (client && client->addr == addr)
return -EBUSY;
return 0;
}
 
/* walk up mux tree */
static int i2c_check_mux_parents(struct i2c_adapter *adapter, int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result;
 
result = device_for_each_child(&adapter->dev, &addr,
__i2c_check_addr_busy);
 
if (!result && parent)
result = i2c_check_mux_parents(parent, addr);
 
return result;
}
 
/* recurse down mux tree */
static int i2c_check_mux_children(struct device *dev, void *addrp)
{
int result;
 
if (dev->type == &i2c_adapter_type)
result = device_for_each_child(dev, addrp,
i2c_check_mux_children);
else
result = __i2c_check_addr_busy(dev, addrp);
 
return result;
}
 
static int i2c_check_addr_busy(struct i2c_adapter *adapter, int addr)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
int result = 0;
 
if (parent)
result = i2c_check_mux_parents(parent, addr);
 
if (!result)
result = device_for_each_child(&adapter->dev, &addr,
i2c_check_mux_children);
 
return result;
}
 
/**
* i2c_lock_adapter - Get exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
void i2c_lock_adapter(struct i2c_adapter *adapter)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
 
if (parent)
i2c_lock_adapter(parent);
else
rt_mutex_lock(&adapter->bus_lock);
}
EXPORT_SYMBOL_GPL(i2c_lock_adapter);
 
/**
* i2c_trylock_adapter - Try to get exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
static int i2c_trylock_adapter(struct i2c_adapter *adapter)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
 
if (parent)
return i2c_trylock_adapter(parent);
else
return rt_mutex_trylock(&adapter->bus_lock);
}
 
/**
* i2c_unlock_adapter - Release exclusive access to an I2C bus segment
* @adapter: Target I2C bus segment
*/
void i2c_unlock_adapter(struct i2c_adapter *adapter)
{
struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter);
 
if (parent)
i2c_unlock_adapter(parent);
else
rt_mutex_unlock(&adapter->bus_lock);
}
EXPORT_SYMBOL_GPL(i2c_unlock_adapter);
 
#endif
 
 
/**
* i2c_transfer - execute a single or combined I2C message
* @adap: Handle to I2C bus
* @msgs: One or more messages to execute before STOP is issued to
65,22 → 317,24
 
if (adap->algo->master_xfer) {
 
/* Retry automatically on arbitration loss */
orig_jiffies = GetTimerTicks();
 
/* Retry automatically on arbitration loss */
orig_jiffies = 0;
for (ret = 0, try = 0; try <= adap->retries; try++) {
 
ret = adap->algo->master_xfer(adap, msgs, num);
if (ret != -EAGAIN)
break;
// if (time_after(jiffies, orig_jiffies + adap->timeout))
// break;
 
if (time_after(GetTimerTicks(), orig_jiffies + adap->timeout))
break;
 
delay(1);
}
// mutex_unlock(&adap->bus_lock);
return ret;
} else {
// dev_dbg(&adap->dev, "I2C level transfers not supported\n");
dbgprintf("I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
}
/drivers/video/drm/i915/i915_drm.h
File deleted
/drivers/video/drm/i915/Gtt/intel-gtt.h
File deleted
/drivers/video/drm/i915/Gtt/agp.h
37,8 → 37,8
SUPPORTED,
};
 
struct agp_memory;
 
 
#define PFX "agpgart: "
 
//#define AGP_DEBUG 1
/drivers/video/drm/i915/Gtt/intel-agp.c
109,15 → 109,10
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB),
{ }
};
 
/drivers/video/drm/i915/Gtt/intel-agp.h
64,6 → 64,7
#define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
96,6 → 97,7
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define GFX_FLSH_CNTL_VLV 0x101008
 
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
211,6 → 213,7
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
234,8 → 237,48
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
 
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
void intel_gmch_remove(struct pci_dev *pdev);
#endif
/drivers/video/drm/i915/Gtt/intel-gtt.c
19,6 → 19,7
#include <errno-base.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/export.h>
//#include <linux/pagemap.h>
//#include <linux/agp_backend.h>
//#include <asm/smp.h>
32,9 → 33,7
struct pci_dev *
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
 
static bool intel_enable_gtt(void);
 
 
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
51,27 → 50,7
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
 
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
111,7 → 90,6
struct pci_dev *bridge_dev;
u8 __iomem *registers;
phys_addr_t gtt_bus_addr;
phys_addr_t gma_bus_addr;
u32 PGETBL_save;
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
121,7 → 99,7
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
dma_addr_t scratch_page_dma;
int refcount;
} intel_private;
 
#define INTEL_GTT_GEN intel_private.driver->gen
132,13 → 110,13
 
static int intel_gtt_setup_scratch_page(void)
{
addr_t page;
dma_addr_t dma_addr;
 
page = AllocPage();
if (page == 0)
dma_addr = AllocPage();
if (dma_addr == 0)
return -ENOMEM;
 
intel_private.scratch_page_dma = page;
intel_private.base.scratch_page_dma = dma_addr;
intel_private.scratch_page = NULL;
 
return 0;
441,8 → 419,8
{
intel_private.driver->cleanup();
 
FreeKernelSpace(intel_private.gtt);
FreeKernelSpace(intel_private.registers);
iounmap(intel_private.gtt);
iounmap(intel_private.registers);
 
intel_gtt_teardown_scratch_page();
}
449,6 → 427,7
 
static int intel_gtt_init(void)
{
u32 gma_addr;
u32 gtt_map_size;
int ret;
 
480,13 → 459,19
 
gtt_map_size = intel_private.base.gtt_total_entries * 4;
 
intel_private.gtt = (u32*)MapIoMem(intel_private.gtt_bus_addr,
gtt_map_size, PG_SW+PG_NOCACHE);
if (!intel_private.gtt) {
intel_private.gtt = NULL;
// if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
// intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
// gtt_map_size);
if (intel_private.gtt == NULL)
intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
gtt_map_size);
if (intel_private.gtt == NULL) {
intel_private.driver->cleanup();
FreeKernelSpace(intel_private.registers);
iounmap(intel_private.registers);
return -ENOMEM;
}
intel_private.base.gtt = intel_private.gtt;
 
asm volatile("wbinvd");
 
500,8 → 485,15
return ret;
}
 
intel_enable_gtt();
if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
else
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
 
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
LEAVE();
 
return 0;
518,20 → 510,10
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static bool intel_enable_gtt(void)
bool intel_enable_gtt(void)
{
u32 gma_addr;
u8 __iomem *reg;
 
if (INTEL_GTT_GEN <= 2)
pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
&gma_addr);
else
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
 
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
if (INTEL_GTT_GEN >= 6)
return true;
 
588,19 → 570,38
return false;
}
 
void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
struct page **pages, unsigned int flags)
void intel_gtt_insert_sg_entries(struct pagelist *st,
unsigned int pg_start,
unsigned int flags)
{
int i, j;
 
j = pg_start;
 
for(i = 0; i < st->nents; i++)
{
dma_addr_t addr = st->page[i];
intel_private.driver->write_entry(addr, j, flags);
j++;
};
 
readl(intel_private.gtt+j-1);
}
 
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
dma_addr_t *pages,
unsigned int flags)
{
int i, j;
 
for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = (dma_addr_t)(pages[i]);
dma_addr_t addr = pages[i];
intel_private.driver->write_entry(addr,
j, flags);
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
608,7 → 609,7
unsigned int i;
 
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.scratch_page_dma,
intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
679,6 → 680,30
return true;
}
 
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
703,6 → 728,28
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else {
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
 
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
}
 
static void gen6_cleanup(void)
{
}
714,7 → 761,6
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
extern int intel_iommu_gfx_mapped;
 
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
730,13 → 776,16
static int i9xx_setup(void)
{
u32 reg_addr;
int size = KB(512);
 
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
 
reg_addr &= 0xfff80000;
 
intel_private.registers = (u8*)MapIoMem(reg_addr, 128 * 4096, PG_SW+PG_NOCACHE);
if (INTEL_GTT_GEN >= 7)
size = MB(2);
 
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
 
752,6 → 801,7
switch (INTEL_GTT_GEN) {
case 5:
case 6:
case 7:
gtt_offset = MB(2);
break;
case 4:
839,6 → 889,23
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
};
 
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
925,6 → 992,82
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
 
945,7 → 1088,7
return 1;
}
 
int intel_gmch_probe(struct pci_dev *pdev,
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge)
{
int i, mask;
962,11 → 1105,12
if (!intel_private.driver)
return 0;
 
// bridge->driver = &intel_fake_agp_driver;
if (bridge) {
bridge->dev_private_data = &intel_private;
bridge->dev = pdev;
bridge->dev = bridge_pdev;
}
 
intel_private.bridge_dev = pdev;
intel_private.bridge_dev = bridge_pdev;
 
dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
 
978,11 → 1122,11
// pci_set_consistent_dma_mask(intel_private.pcidev,
// DMA_BIT_MASK(mask));
 
/*if (bridge->driver == &intel_810_driver)
return 1;*/
if (intel_gtt_init() != 0) {
// intel_gmch_remove();
 
if (intel_gtt_init() != 0)
return 0;
}
 
return 1;
}
1002,7 → 1146,7
EXPORT_SYMBOL(intel_gtt_chipset_flush);
 
 
phys_addr_t get_bus_addr(void)
{
return intel_private.gma_bus_addr;
};
//phys_addr_t get_bus_addr(void)
//{
// return intel_private.gma_bus_addr;
//};
/drivers/video/drm/i915/dvo.h
24,9 → 24,8
#define _INTEL_DVO_H
 
#include <linux/i2c.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
 
struct intel_dvo_device {
58,13 → 57,12
void (*create_resources)(struct intel_dvo_device *dvo);
 
/*
* Turn on/off output or set intermediate power levels if available.
* Turn on/off output.
*
* Unsupported intermediate modes drop to the lower power setting.
* If the mode is DPMSModeOff, the output must be disabled,
* as the DPLL may be disabled afterwards.
* Because none of our dvo drivers support an intermediate power levels,
* we don't expose this in the interfac.
*/
void (*dpms)(struct intel_dvo_device *dvo, int mode);
void (*dpms)(struct intel_dvo_device *dvo, bool enable);
 
/*
* Callback for testing a video mode for a given output.
86,7 → 84,7
* buses with clock limitations.
*/
bool (*mode_fixup)(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
/*
115,6 → 113,12
*/
enum drm_connector_status (*detect)(struct intel_dvo_device *dvo);
 
/*
* Probe the current hw status, returning true if the connected output
* is active.
*/
bool (*get_hw_state)(struct intel_dvo_device *dev);
 
/**
* Query the device for the modes it provides.
*
140,5 → 144,6
extern struct intel_dvo_dev_ops ivch_ops;
extern struct intel_dvo_dev_ops tfp410_ops;
extern struct intel_dvo_dev_ops ch7017_ops;
extern struct intel_dvo_dev_ops ns2501_ops;
 
#endif /* _INTEL_DVO_H */
/drivers/video/drm/i915/dvo_ch7017.c
163,7 → 163,7
};
 
static void ch7017_dump_regs(struct intel_dvo_device *dvo);
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable);
 
static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
{
309,7 → 309,7
lvds_power_down = CH7017_LVDS_POWER_DOWN_DEFAULT_RESERVED |
(mode->hdisplay & 0x0700) >> 8;
 
ch7017_dpms(dvo, DRM_MODE_DPMS_OFF);
ch7017_dpms(dvo, false);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT,
horizontal_active_pixel_input);
ch7017_write(dvo, CH7017_HORIZONTAL_ACTIVE_PIXEL_OUTPUT,
331,7 → 331,7
}
 
/* set the CH7017 power state */
static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7017_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t val;
 
345,7 → 345,7
CH7017_DAC3_POWER_DOWN |
CH7017_TV_POWER_DOWN_EN);
 
if (mode == DRM_MODE_DPMS_ON) {
if (enable) {
/* Turn on the LVDS */
ch7017_write(dvo, CH7017_LVDS_POWER_DOWN,
val & ~CH7017_LVDS_POWER_DOWN_EN);
359,6 → 359,18
msleep(20);
}
 
static bool ch7017_get_hw_state(struct intel_dvo_device *dvo)
{
uint8_t val;
 
ch7017_read(dvo, CH7017_LVDS_POWER_DOWN, &val);
 
if (val & CH7017_LVDS_POWER_DOWN_EN)
return false;
else
return true;
}
 
static void ch7017_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
396,6 → 408,7
.mode_valid = ch7017_mode_valid,
.mode_set = ch7017_mode_set,
.dpms = ch7017_dpms,
.get_hw_state = ch7017_get_hw_state,
.dump_regs = ch7017_dump_regs,
.destroy = ch7017_destroy,
};
/drivers/video/drm/i915/dvo_ch7xxx.c
289,14 → 289,26
}
 
/* set the CH7xxx power state */
static void ch7xxx_dpms(struct intel_dvo_device *dvo, int mode)
static void ch7xxx_dpms(struct intel_dvo_device *dvo, bool enable)
{
if (mode == DRM_MODE_DPMS_ON)
if (enable)
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_DVIL | CH7xxx_PM_DVIP);
else
ch7xxx_writeb(dvo, CH7xxx_PM, CH7xxx_PM_FPD);
}
 
static bool ch7xxx_get_hw_state(struct intel_dvo_device *dvo)
{
u8 val;
 
ch7xxx_readb(dvo, CH7xxx_PM, &val);
 
if (val & (CH7xxx_PM_DVIL | CH7xxx_PM_DVIP))
return true;
else
return false;
}
 
static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
{
int i;
326,6 → 338,7
.mode_valid = ch7xxx_mode_valid,
.mode_set = ch7xxx_mode_set,
.dpms = ch7xxx_dpms,
.get_hw_state = ch7xxx_get_hw_state,
.dump_regs = ch7xxx_dump_regs,
.destroy = ch7xxx_destroy,
};
/drivers/video/drm/i915/dvo_ivch.c
288,7 → 288,7
}
 
/** Sets the power state of the panel connected to the ivch */
static void ivch_dpms(struct intel_dvo_device *dvo, int mode)
static void ivch_dpms(struct intel_dvo_device *dvo, bool enable)
{
int i;
uint16_t vr01, vr30, backlight;
297,13 → 297,13
if (!ivch_read(dvo, VR01, &vr01))
return;
 
if (mode == DRM_MODE_DPMS_ON)
if (enable)
backlight = 1;
else
backlight = 0;
ivch_write(dvo, VR80, backlight);
 
if (mode == DRM_MODE_DPMS_ON)
if (enable)
vr01 |= VR01_LCD_ENABLE | VR01_DVO_ENABLE;
else
vr01 &= ~(VR01_LCD_ENABLE | VR01_DVO_ENABLE);
315,7 → 315,7
if (!ivch_read(dvo, VR30, &vr30))
break;
 
if (((vr30 & VR30_PANEL_ON) != 0) == (mode == DRM_MODE_DPMS_ON))
if (((vr30 & VR30_PANEL_ON) != 0) == enable)
break;
udelay(1000);
}
323,6 → 323,20
udelay(16 * 1000);
}
 
static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
{
uint16_t vr01;
 
/* Set the new power state of the panel. */
if (!ivch_read(dvo, VR01, &vr01))
return false;
 
if (vr01 & VR01_LCD_ENABLE)
return true;
else
return false;
}
 
static void ivch_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
413,6 → 427,7
struct intel_dvo_dev_ops ivch_ops = {
.init = ivch_init,
.dpms = ivch_dpms,
.get_hw_state = ivch_get_hw_state,
.mode_valid = ivch_mode_valid,
.mode_set = ivch_mode_set,
.detect = ivch_detect,
/drivers/video/drm/i915/dvo_ns2501.c
0,0 → 1,588
/*
*
* Copyright (c) 2012 Gilles Dartiguelongue, Thomas Richter
*
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#include "dvo.h"
#include "i915_reg.h"
#include "i915_drv.h"
 
#define NS2501_VID 0x1305
#define NS2501_DID 0x6726
 
#define NS2501_VID_LO 0x00
#define NS2501_VID_HI 0x01
#define NS2501_DID_LO 0x02
#define NS2501_DID_HI 0x03
#define NS2501_REV 0x04
#define NS2501_RSVD 0x05
#define NS2501_FREQ_LO 0x06
#define NS2501_FREQ_HI 0x07
 
#define NS2501_REG8 0x08
#define NS2501_8_VEN (1<<5)
#define NS2501_8_HEN (1<<4)
#define NS2501_8_DSEL (1<<3)
#define NS2501_8_BPAS (1<<2)
#define NS2501_8_RSVD (1<<1)
#define NS2501_8_PD (1<<0)
 
#define NS2501_REG9 0x09
#define NS2501_9_VLOW (1<<7)
#define NS2501_9_MSEL_MASK (0x7<<4)
#define NS2501_9_TSEL (1<<3)
#define NS2501_9_RSEN (1<<2)
#define NS2501_9_RSVD (1<<1)
#define NS2501_9_MDI (1<<0)
 
#define NS2501_REGC 0x0c
 
struct ns2501_priv {
//I2CDevRec d;
bool quiet;
int reg_8_shadow;
int reg_8_set;
// Shadow registers for i915
int dvoc;
int pll_a;
int srcdim;
int fw_blc;
};
 
#define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
 
/*
* For reasons unclear to me, the ns2501 at least on the Fujitsu/Siemens
* laptops does not react on the i2c bus unless
* both the PLL is running and the display is configured in its native
* resolution.
* This function forces the DVO on, and stores the registers it touches.
* Afterwards, registers are restored to regular values.
*
* This is pretty much a hack, though it works.
* Without that, ns2501_readb and ns2501_writeb fail
* when switching the resolution.
*/
 
static void enable_dvo(struct intel_dvo_device *dvo)
{
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
struct i2c_adapter *adapter = dvo->i2c_bus;
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
 
DRM_DEBUG_KMS("%s: Trying to re-enable the DVO\n", __FUNCTION__);
 
ns->dvoc = I915_READ(DVO_C);
ns->pll_a = I915_READ(_DPLL_A);
ns->srcdim = I915_READ(DVOC_SRCDIM);
ns->fw_blc = I915_READ(FW_BLC);
 
I915_WRITE(DVOC, 0x10004084);
I915_WRITE(_DPLL_A, 0xd0820000);
I915_WRITE(DVOC_SRCDIM, 0x400300); // 1024x768
I915_WRITE(FW_BLC, 0x1080304);
 
I915_WRITE(DVOC, 0x90004084);
}
 
/*
* Restore the I915 registers modified by the above
* trigger function.
*/
static void restore_dvo(struct intel_dvo_device *dvo)
{
struct i2c_adapter *adapter = dvo->i2c_bus;
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
I915_WRITE(DVOC, ns->dvoc);
I915_WRITE(_DPLL_A, ns->pll_a);
I915_WRITE(DVOC_SRCDIM, ns->srcdim);
I915_WRITE(FW_BLC, ns->fw_blc);
}
 
/*
** Read a register from the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
static bool ns2501_readb(struct intel_dvo_device *dvo, int addr, uint8_t * ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
u8 out_buf[2];
u8 in_buf[2];
 
struct i2c_msg msgs[] = {
{
.addr = dvo->slave_addr,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = dvo->slave_addr,
.flags = I2C_M_RD,
.len = 1,
.buf = in_buf,
}
};
 
out_buf[0] = addr;
out_buf[1] = 0;
 
if (i2c_transfer(adapter, msgs, 2) == 2) {
*ch = in_buf[0];
return true;
};
 
if (!ns->quiet) {
DRM_DEBUG_KMS
("Unable to read register 0x%02x from %s:0x%02x.\n", addr,
adapter->name, dvo->slave_addr);
}
 
return false;
}
 
/*
** Write a register to the ns2501.
** Returns true if successful, false otherwise.
** If it returns false, it might be wise to enable the
** DVO with the above function.
*/
static bool ns2501_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
{
struct ns2501_priv *ns = dvo->dev_priv;
struct i2c_adapter *adapter = dvo->i2c_bus;
uint8_t out_buf[2];
 
struct i2c_msg msg = {
.addr = dvo->slave_addr,
.flags = 0,
.len = 2,
.buf = out_buf,
};
 
out_buf[0] = addr;
out_buf[1] = ch;
 
if (i2c_transfer(adapter, &msg, 1) == 1) {
return true;
}
 
if (!ns->quiet) {
DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d\n",
addr, adapter->name, dvo->slave_addr);
}
 
return false;
}
 
/* National Semiconductor 2501 driver for chip on i2c bus
* scan for the chip on the bus.
* Hope the VBIOS initialized the PLL correctly so we can
* talk to it. If not, it will not be seen and not detected.
* Bummer!
*/
static bool ns2501_init(struct intel_dvo_device *dvo,
struct i2c_adapter *adapter)
{
/* this will detect the NS2501 chip on the specified i2c bus */
struct ns2501_priv *ns;
unsigned char ch;
 
ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
if (ns == NULL)
return false;
 
dvo->i2c_bus = adapter;
dvo->dev_priv = ns;
ns->quiet = true;
 
if (!ns2501_readb(dvo, NS2501_VID_LO, &ch))
goto out;
 
if (ch != (NS2501_VID & 0xff)) {
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
 
if (!ns2501_readb(dvo, NS2501_DID_LO, &ch))
goto out;
 
if (ch != (NS2501_DID & 0xff)) {
DRM_DEBUG_KMS("ns2501 not detected got %d: from %s Slave %d.\n",
ch, adapter->name, dvo->slave_addr);
goto out;
}
ns->quiet = false;
ns->reg_8_set = 0;
ns->reg_8_shadow =
NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
 
DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
return true;
 
out:
kfree(ns);
return false;
}
 
static enum drm_connector_status ns2501_detect(struct intel_dvo_device *dvo)
{
/*
* This is a Laptop display, it doesn't have hotplugging.
* Even if not, the detection bit of the 2501 is unreliable as
* it only works for some display types.
* It is even more unreliable as the PLL must be active for
* allowing reading from the chiop.
*/
return connector_status_connected;
}
 
static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
struct drm_display_mode *mode)
{
DRM_DEBUG_KMS
("%s: is mode valid (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d)\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
 
/*
* Currently, these are all the modes I have data from.
* More might exist. Unclear how to find the native resolution
* of the panel in here so we could always accept it
* by disabling the scaler.
*/
if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
(mode->hdisplay == 640 && mode->vdisplay == 480) ||
(mode->hdisplay == 1024 && mode->vdisplay == 768)) {
return MODE_OK;
} else {
return MODE_ONE_SIZE; /* Is this a reasonable error? */
}
}
 
static void ns2501_mode_set(struct intel_dvo_device *dvo,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
bool ok;
bool restore = false;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
 
DRM_DEBUG_KMS
("%s: set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
__FUNCTION__, mode->hdisplay, mode->htotal, mode->vdisplay,
mode->vtotal);
 
/*
* Where do I find the native resolution for which scaling is not required???
*
* First trigger the DVO on as otherwise the chip does not appear on the i2c
* bus.
*/
do {
ok = true;
 
if (mode->hdisplay == 800 && mode->vdisplay == 600) {
/* mode 277 */
ns->reg_8_shadow &= ~NS2501_8_BPAS;
DRM_DEBUG_KMS("%s: switching to 800x600\n",
__FUNCTION__);
 
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ok &= ns2501_writeb(dvo, 0x11, 0xc8); // 0xc7 also works.
ok &= ns2501_writeb(dvo, 0x1b, 0x19);
ok &= ns2501_writeb(dvo, 0x1c, 0x62); // VBIOS left 0x64 here, but 0x62 works nicer
ok &= ns2501_writeb(dvo, 0x1d, 0x02);
 
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
 
ok &= ns2501_writeb(dvo, 0x80, 0x27);
ok &= ns2501_writeb(dvo, 0x81, 0x03);
ok &= ns2501_writeb(dvo, 0x82, 0x41);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
 
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x04);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
 
ok &= ns2501_writeb(dvo, 0x90, 0xfe); /* vertical. VBIOS left 0xff here, but 0xfe works better */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);
 
ok &= ns2501_writeb(dvo, 0x96, 0x00);
 
ok &= ns2501_writeb(dvo, 0x99, 0x00);
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
 
ok &= ns2501_writeb(dvo, 0x9c, 0x23); /* Looks like first and last line of the image. */
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
 
ok &= ns2501_writeb(dvo, 0xa4, 0x80);
 
ok &= ns2501_writeb(dvo, 0xb6, 0x00);
 
ok &= ns2501_writeb(dvo, 0xb9, 0xc8); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
 
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
 
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
 
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
 
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
ok &= ns2501_writeb(dvo, 0xc7, 0x73);
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
 
} else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
/* mode 274 */
DRM_DEBUG_KMS("%s: switching to 640x480\n",
__FUNCTION__);
/*
* No, I do not know where this data comes from.
* It is just what the video bios left in the DVO, so
* I'm just copying it here over.
* This also means that I cannot support any other modes
* except the ones supported by the bios.
*/
ns->reg_8_shadow &= ~NS2501_8_BPAS;
 
ok &= ns2501_writeb(dvo, 0x11, 0xa0);
ok &= ns2501_writeb(dvo, 0x1b, 0x11);
ok &= ns2501_writeb(dvo, 0x1c, 0x54);
ok &= ns2501_writeb(dvo, 0x1d, 0x03);
 
ok &= ns2501_writeb(dvo, 0x34, 0x03);
ok &= ns2501_writeb(dvo, 0x35, 0xff);
 
ok &= ns2501_writeb(dvo, 0x80, 0xff);
ok &= ns2501_writeb(dvo, 0x81, 0x07);
ok &= ns2501_writeb(dvo, 0x82, 0x3d);
ok &= ns2501_writeb(dvo, 0x83, 0x05);
 
ok &= ns2501_writeb(dvo, 0x8d, 0x02);
ok &= ns2501_writeb(dvo, 0x8e, 0x10);
ok &= ns2501_writeb(dvo, 0x8f, 0x00);
 
ok &= ns2501_writeb(dvo, 0x90, 0xff); /* vertical */
ok &= ns2501_writeb(dvo, 0x91, 0x07);
ok &= ns2501_writeb(dvo, 0x94, 0x00);
ok &= ns2501_writeb(dvo, 0x95, 0x00);
 
ok &= ns2501_writeb(dvo, 0x96, 0x05);
 
ok &= ns2501_writeb(dvo, 0x99, 0x00);
ok &= ns2501_writeb(dvo, 0x9a, 0x88);
 
ok &= ns2501_writeb(dvo, 0x9c, 0x24);
ok &= ns2501_writeb(dvo, 0x9d, 0x00);
ok &= ns2501_writeb(dvo, 0x9e, 0x25);
ok &= ns2501_writeb(dvo, 0x9f, 0x03);
 
ok &= ns2501_writeb(dvo, 0xa4, 0x84);
 
ok &= ns2501_writeb(dvo, 0xb6, 0x09);
 
ok &= ns2501_writeb(dvo, 0xb9, 0xa0); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xba, 0x00); /* horizontal? */
 
ok &= ns2501_writeb(dvo, 0xc0, 0x05); /* horizontal? */
ok &= ns2501_writeb(dvo, 0xc1, 0x90);
 
ok &= ns2501_writeb(dvo, 0xc2, 0x00);
ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
 
ok &= ns2501_writeb(dvo, 0xc4, 0x03);
ok &= ns2501_writeb(dvo, 0xc5, 0x16);
 
ok &= ns2501_writeb(dvo, 0xc6, 0x00);
ok &= ns2501_writeb(dvo, 0xc7, 0x02);
ok &= ns2501_writeb(dvo, 0xc8, 0x02);
 
} else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
/* mode 280 */
DRM_DEBUG_KMS("%s: switching to 1024x768\n",
__FUNCTION__);
/*
* This might or might not work, actually. I'm silently
* assuming here that the native panel resolution is
* 1024x768. If not, then this leaves the scaler disabled
* generating a picture that is likely not the expected.
*
* Problem is that I do not know where to take the panel
* dimensions from.
*
* Enable the bypass, scaling not required.
*
* The scaler registers are irrelevant here....
*
*/
ns->reg_8_shadow |= NS2501_8_BPAS;
ok &= ns2501_writeb(dvo, 0x37, 0x44);
} else {
/*
* Data not known. Bummer!
* Hopefully, the code should not go here
* as mode_OK delivered no other modes.
*/
ns->reg_8_shadow |= NS2501_8_BPAS;
}
ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
 
if (!ok) {
if (restore)
restore_dvo(dvo);
enable_dvo(dvo);
restore = true;
}
} while (!ok);
/*
* Restore the old i915 registers before
* forcing the ns2501 on.
*/
if (restore)
restore_dvo(dvo);
}
 
/* set the NS2501 power state */
static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
{
unsigned char ch;
 
if (!ns2501_readb(dvo, NS2501_REG8, &ch))
return false;
 
if (ch & NS2501_8_PD)
return true;
else
return false;
}
 
/* set the NS2501 power state */
static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
{
bool ok;
bool restore = false;
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
unsigned char ch;
 
DRM_DEBUG_KMS("%s: Trying set the dpms of the DVO to %i\n",
__FUNCTION__, enable);
 
ch = ns->reg_8_shadow;
 
if (enable)
ch |= NS2501_8_PD;
else
ch &= ~NS2501_8_PD;
 
if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
ns->reg_8_set = 1;
ns->reg_8_shadow = ch;
 
do {
ok = true;
ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
ok &=
ns2501_writeb(dvo, 0x34,
enable ? 0x03 : 0x00);
ok &=
ns2501_writeb(dvo, 0x35,
enable ? 0xff : 0x00);
if (!ok) {
if (restore)
restore_dvo(dvo);
enable_dvo(dvo);
restore = true;
}
} while (!ok);
 
if (restore)
restore_dvo(dvo);
}
}
 
static void ns2501_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
 
ns2501_readb(dvo, NS2501_FREQ_LO, &val);
DRM_LOG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_FREQ_HI, &val);
DRM_LOG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG8, &val);
DRM_LOG_KMS("NS2501_REG8: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REG9, &val);
DRM_LOG_KMS("NS2501_REG9: 0x%02x\n", val);
ns2501_readb(dvo, NS2501_REGC, &val);
DRM_LOG_KMS("NS2501_REGC: 0x%02x\n", val);
}
 
static void ns2501_destroy(struct intel_dvo_device *dvo)
{
struct ns2501_priv *ns = dvo->dev_priv;
 
if (ns) {
kfree(ns);
dvo->dev_priv = NULL;
}
}
 
struct intel_dvo_dev_ops ns2501_ops = {
.init = ns2501_init,
.detect = ns2501_detect,
.mode_valid = ns2501_mode_valid,
.mode_set = ns2501_mode_set,
.dpms = ns2501_dpms,
.get_hw_state = ns2501_get_hw_state,
.dump_regs = ns2501_dump_regs,
.destroy = ns2501_destroy,
};
/drivers/video/drm/i915/dvo_sil164.c
208,7 → 208,7
}
 
/* set the SIL164 power state */
static void sil164_dpms(struct intel_dvo_device *dvo, int mode)
static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
{
int ret;
unsigned char ch;
217,7 → 217,7
if (ret == false)
return;
 
if (mode == DRM_MODE_DPMS_ON)
if (enable)
ch |= SIL164_8_PD;
else
ch &= ~SIL164_8_PD;
226,6 → 226,21
return;
}
 
static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
{
int ret;
unsigned char ch;
 
ret = sil164_readb(dvo, SIL164_REG8, &ch);
if (ret == false)
return false;
 
if (ch & SIL164_8_PD)
return true;
else
return false;
}
 
static void sil164_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val;
258,6 → 273,7
.mode_valid = sil164_mode_valid,
.mode_set = sil164_mode_set,
.dpms = sil164_dpms,
.get_hw_state = sil164_get_hw_state,
.dump_regs = sil164_dump_regs,
.destroy = sil164_destroy,
};
/drivers/video/drm/i915/dvo_tfp410.c
234,7 → 234,7
}
 
/* set the tfp410 power state */
static void tfp410_dpms(struct intel_dvo_device *dvo, int mode)
static void tfp410_dpms(struct intel_dvo_device *dvo, bool enable)
{
uint8_t ctl1;
 
241,7 → 241,7
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return;
 
if (mode == DRM_MODE_DPMS_ON)
if (enable)
ctl1 |= TFP410_CTL_1_PD;
else
ctl1 &= ~TFP410_CTL_1_PD;
249,6 → 249,19
tfp410_writeb(dvo, TFP410_CTL_1, ctl1);
}
 
static bool tfp410_get_hw_state(struct intel_dvo_device *dvo)
{
uint8_t ctl1;
 
if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1))
return false;
 
if (ctl1 & TFP410_CTL_1_PD)
return true;
else
return false;
}
 
static void tfp410_dump_regs(struct intel_dvo_device *dvo)
{
uint8_t val, val2;
299,6 → 312,7
.mode_valid = tfp410_mode_valid,
.mode_set = tfp410_mode_set,
.dpms = tfp410_dpms,
.get_hw_state = tfp410_get_hw_state,
.dump_regs = tfp410_dump_regs,
.destroy = tfp410_destroy,
};
/drivers/video/drm/i915/i915
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/i915/i915.map
0,0 → 1,1284
Archive member included because of file (symbol)
 
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o)
main.o (dbg_open)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o)
pci.o (malloc)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o)
i915_gem.o (kref_init)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o)
intel_bios.o (memset)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o)
intel_bios.o (memcmp)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o)
intel_pm.o (jiffies_to_msecs)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o)
intel_dp.o (memcpy)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o)
intel_dp.o (strncpy)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (vsnprintf)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o)
e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o (strlcpy)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o)
e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o (list_sort)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o)
e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o (idr_pre_get)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o)
e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o (strncmp)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (get_fileinfo)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (create_file)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (set_file_size)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (write_file)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o) (_memcpy)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o) (_strncpy)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) (_ctype)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o) (strnlen)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o) (_strncmp)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o) (_strnlen)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o)
main.o (_imp__PciRead32)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o)
main.o (_imp__GetService)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o)
main.o (_imp__RegService)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o)
pci.o (_imp__PciWrite32)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o)
pci.o (_imp__PciRead8)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o)
pci.o (_imp__PciRead16)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o)
pci.o (_imp__Delay)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o)
pci.o (_imp__PciApi)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o)
pci.o (_imp__MapIoMem)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o)
pci.o (_imp__FreeKernelSpace)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o)
pci.o (_imp__PciWrite16)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o)
i915_drv.o (_imp__MutexInit)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o)
i915_gem.o (_imp__AllocPage)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o)
i915_gem.o (_imp__FreePage)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o)
i915_gem.o (_imp__MutexLock)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o)
i915_gem.o (_imp__MutexUnlock)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o)
i915_gem.o (_imp__GetTimerTicks)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o)
i915_gem.o (_imp__CreateEvent)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o)
i915_gem.o (_imp__WaitEvent)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o)
i915_gem.o (_imp__DestroyEvent)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o)
i915_gem.o (_imp__AllocKernelSpace)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o)
i915_gem_gtt.o (_imp__MapPage)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o)
i915_irq.o (_imp__RaiseEvent)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o)
i915_irq.o (_imp__AttachIntHandler)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o)
intel_panel.o (_imp__PciWrite8)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o)
kms_display.o (_imp__DestroyObject)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o)
kms_display.o (_imp__TimerHs)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o)
kms_display.o (_imp__KernelAlloc)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o)
kms_display.o (_imp__GetPgAddr)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o)
kms_display.o (_imp__KernelFree)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o)
kms_display.o (_imp__SetScreen)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o)
kms_display.o (_imp__GetDisplay)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o) (_imp__SysMsgBoardStr)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o) (_head_core_dll)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o)
e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o) (core_dll_iname)
 
Allocating common symbols
Common symbol size file
 
cmd_buffer 0x4 kms_display.o
intel_agp_enabled 0x4 Gtt/intel-agp.o
x86_clflush_size 0x4 main.o
i915_lvds_channel_mode
0x4 i915_drv.o
main_device 0x4 i915_drv.o
cmd_offset 0x4 kms_display.o
 
Discarded input sections
 
.drectve 0x00000000 0x24 main.o
.drectve 0x00000000 0x44 i915_drv.o
.drectve 0x00000000 0x38 kms_display.o
.drectve 0x00000000 0x24 Gtt/intel-agp.o
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o)
.text 0x00000000 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o)
 
Memory Configuration
 
Name Origin Length Attributes
*default* 0x00000000 0xffffffff
 
Linker script and memory map
 
0x00000000 __image_base__ = 0x0
0x00000000 __dll__ = 0x0
0x00000000 ___ImageBase = 0x0
0x00001000 __section_alignment__ = 0x1000
0x00000200 __file_alignment__ = 0x200
0x00000004 __major_os_version__ = 0x4
0x00000000 __minor_os_version__ = 0x0
0x00000001 __major_image_version__ = 0x1
0x00000000 __minor_image_version__ = 0x0
0x00000004 __major_subsystem_version__ = 0x4
0x00000000 __minor_subsystem_version__ = 0x0
0x00000003 __subsystem__ = 0x3
0x00200000 __size_of_stack_reserve__ = 0x200000
0x00001000 __size_of_stack_commit__ = 0x1000
0x00100000 __size_of_heap_reserve__ = 0x100000
0x00001000 __size_of_heap_commit__ = 0x1000
0x00000000 __loader_flags__ = 0x0
0x00000000 __dll_characteristics__ = 0x0
0x00000268 . = SIZEOF_HEADERS
0x00001000 . = ALIGN (__section_alignment__)
 
.text 0x00001000 0x53600
*(.text)
.text 0x00001000 0x310 main.o
0x00001000 display_handler@4
0x000010a0 pci_scan_filter
0x00001100 parse_cmdline
0x00001170 drvEntry
0x000012f0 cpu_detect
.text 0x00001310 0x1120 pci.o
0x000018b0 pci_setup_device
0x00001b40 pci_scan_slot
0x00001d40 pci_find_capability
0x00001e10 enum_pci_devices
0x00001eb0 find_pci_device
0x00001f40 pci_get_device
0x00001fb0 pci_get_bus_and_slot
0x00002000 pci_get_class
0x00002050 ioport_map
0x00002070 pci_iomap
0x00002130 pci_iounmap
0x00002150 pci_enable_rom
0x000021c0 pci_disable_rom
0x00002210 pci_get_rom_size
0x000022a0 pci_map_rom
0x00002360 pci_unmap_rom
0x000023b0 pci_set_master
.text 0x00002430 0x5d0 dvo_ch7017.o
.text 0x00002a00 0x570 dvo_ch7xxx.o
.text 0x00002f70 0x700 dvo_ivch.o
.text 0x00003670 0xd80 dvo_ns2501.o
.text 0x000043f0 0x420 dvo_sil164.o
.text 0x00004810 0x5e0 dvo_tfp410.o
.text 0x00004df0 0x9c0 i915_dma.o
0x00004df0 i915_update_dri1_breadcrumb
0x00004e50 i915_driver_load
.text 0x000057b0 0x950 i915_drv.o
0x000058c0 intel_detect_pch
0x000059f0 i915_semaphore_is_enabled
0x00005a20 drm_get_dev
0x00005b20 i915_init
0x00005bf0 i915_read8
0x00005cd0 i915_read16
0x00005db0 i915_read32
0x00005e90 i915_read64
0x00005f70 i915_write8
0x00005fd0 i915_write16
0x00006030 i915_write32
0x00006090 i915_write64
.text 0x00006100 0x2e70 i915_gem.o
0x000066e0 drm_gem_object_init
0x00006750 drm_gem_object_release
0x00006760 i915_gem_get_aperture_ioctl
0x000067e0 i915_gem_check_wedge
0x00006820 i915_gem_release_mmap
0x000068f0 i915_gem_get_unfenced_gtt_alignment
0x00006930 i915_gem_object_get_pages
0x000069d0 i915_gem_object_move_to_active
0x00006b10 i915_gem_next_request_seqno
0x00006b60 i915_add_request
0x00006d60 i915_wait_seqno
0x00007010 i915_gem_reset
0x000070e0 i915_gem_retire_requests_ring
0x00007260 i915_gem_retire_requests
0x000073e0 i915_gem_object_sync
0x000074c0 i915_gpu_idle
0x00007570 i915_gem_object_put_fence
0x00007600 i915_gem_object_get_fence
0x000077b0 i915_gem_clflush_object
0x000078a0 i915_gem_object_set_to_gtt_domain
0x000079c0 i915_gem_object_finish_gpu
0x000079f0 i915_gem_object_unbind
0x00007b80 i915_gem_object_set_cache_level
0x00007d30 i915_gem_object_set_to_cpu_domain
0x00007ed0 i915_gem_object_pin
0x00008360 i915_gem_object_pin_to_display_plane
0x00008400 i915_gem_object_unpin
0x00008490 i915_gem_object_init
0x000084f0 i915_gem_alloc_object
0x000085a0 i915_gem_init_object
0x000085d0 i915_gem_free_object
0x00008750 drm_gem_object_free
0x000087a0 i915_gem_l3_remap
0x000088a0 i915_gem_init_swizzling
0x00008980 i915_gem_init_ppgtt
0x00008b70 i915_gem_init_hw
0x00008ce0 i915_gem_init
0x00008dd0 i915_gem_cleanup_ringbuffer
0x00008e10 i915_gem_load
.text 0x00008f70 0x30 i915_gem_context.o
0x00008f70 i915_gem_context_init
0x00008f90 i915_switch_context
.text 0x00008fa0 0x650 i915_gem_gtt.o
0x000090b0 i915_gem_init_aliasing_ppgtt
0x00009200 i915_gem_cleanup_aliasing_ppgtt
0x00009270 i915_ppgtt_bind_object
0x000093c0 i915_ppgtt_unbind_object
0x000093f0 i915_gem_gtt_prepare_object
0x00009400 i915_gem_gtt_bind_object
0x00009460 i915_gem_gtt_unbind_object
0x00009490 i915_gem_gtt_finish_object
0x00009530 i915_gem_init_global_gtt
.text 0x000095f0 0x3e0 i915_gem_stolen.o
0x00009680 i915_gem_cleanup_stolen
0x000096d0 i915_gem_init_stolen
.text 0x000099d0 0x170 i915_gem_tiling.o
0x000099d0 i915_gem_detect_bit_6_swizzle
.text 0x00009b40 0x11f0 i915_irq.o
0x00009bb0 i915_enable_pipestat
0x00009c20 i915_disable_pipestat
0x00009c80 i915_handle_error
0x0000a320 irq_handler_kms
0x0000a8a0 intel_irq_init
0x0000a8b0 drm_irq_install
.text 0x0000ad30 0x10b0 intel_bios.o
0x0000ae30 intel_parse_bios
0x0000bd70 intel_setup_bios
.text 0x0000bde0 0xf70 intel_crt.o
0x0000cb20 intel_crt_init
.text 0x0000cd50 0x7e0 intel_ddi.o
0x0000cd50 intel_prepare_ddi_buffers
0x0000cde0 intel_prepare_ddi
0x0000ce40 hsw_fdi_link_train
0x0000d090 intel_ddi_init
0x0000d0f0 intel_ddi_mode_set
0x0000d3b0 intel_ddi_get_hw_state
0x0000d470 intel_enable_ddi
0x0000d4d0 intel_disable_ddi
.text 0x0000d530 0xb7e0 intel_display.o
0x0000dc20 intel_crtc_load_lut
0x0000fef0 intel_dpio_read
0x0000fff0 intel_pipe_has_type
0x00010040 intel_wait_for_vblank
0x00012fa0 intel_wait_for_pipe_off
0x00013180 assert_pipe
0x00013580 intel_flush_display_plane
0x00014260 intel_pin_and_fence_fb_obj
0x00014350 intel_unpin_fb_obj
0x00014360 intel_cpt_verify_modeset
0x000153f0 intel_crtc_update_dpms
0x00015460 intel_modeset_disable
0x000154b0 intel_encoder_noop
0x000154c0 intel_encoder_destroy
0x000154e0 intel_encoder_dpms
0x00015510 intel_connector_get_hw_state
0x00015540 ironlake_init_pch_refclk
0x00015890 intel_write_eld
0x00015980 intel_crtc_fb_gamma_set
0x000159e0 intel_crtc_fb_gamma_get
0x00015a30 intel_get_load_detect_pipe
0x00015b90 intel_crtc_mode_get
0x00015f00 intel_mark_busy
0x00015f20 intel_mark_idle
0x00015f30 intel_mark_fb_busy
0x00015f80 intel_mark_fb_idle
0x000160a0 intel_encoder_check_is_cloned
0x00016120 intel_modeset_check_state
0x00016290 intel_connector_dpms
0x00016310 intel_set_mode
0x00017380 intel_release_load_detect_pipe
0x00017490 intel_get_pipe_from_crtc_id
0x000174f0 intel_framebuffer_init
0x00017620 intel_modeset_init_hw
0x00017650 intel_modeset_init
0x00018610 intel_modeset_setup_hw_state
0x00018c30 intel_modeset_gem_init
0x00018c50 intel_modeset_cleanup
0x00018c60 intel_best_encoder
0x00018c70 intel_connector_attach_encoder
0x00018c90 intel_modeset_vga_set_state
.text 0x00018d10 0x3310 intel_dp.o
0x0001b650 intel_encoder_is_pch_edp
0x0001b670 intel_edp_link_config
0x0001b6b0 intel_edp_target_clock
0x0001b6d0 intel_dp_set_m_n
0x0001b890 intel_trans_dp_port_sel
0x0001b8e0 intel_dpd_is_edp
0x0001b940 intel_dp_init
.text 0x0001c020 0x7f0 intel_dvo.o
0x0001c560 intel_dvo_init
.text 0x0001c810 0x4c0 intel_fb.o
0x0001c810 framebuffer_alloc
0x0001cbf0 intel_fbdev_init
.text 0x0001ccd0 0x1770 intel_hdmi.o
0x0001e120 enc_to_intel_hdmi
0x0001e130 intel_dip_infoframe_csum
0x0001e160 intel_hdmi_init
.text 0x0001e440 0xc20 intel_i2c.o
0x0001ee00 intel_i2c_reset
0x0001eeb0 intel_setup_gmbus
0x0001efe0 intel_gmbus_get_adapter
0x0001f020 intel_gmbus_set_speed
0x0001f040 intel_gmbus_force_bit
0x0001f050 intel_teardown_gmbus
.text 0x0001f060 0x1180 intel_lvds.o
0x0001fbe0 intel_lvds_init
.text 0x000201e0 0xc0 intel_modes.o
0x000201e0 intel_connector_update_modes
0x00020240 intel_ddc_get_modes
0x00020280 intel_attach_force_audio_property
0x00020290 intel_attach_broadcast_rgb_property
.text 0x000202a0 0x1c0 intel_opregion.o
0x000202a0 intel_opregion_setup
.text 0x00020460 0x7d0 intel_panel.o
0x000204c0 intel_fixed_panel_mode
0x00020500 intel_pch_panel_fitting
0x00020620 intel_panel_get_max_backlight
0x000208e0 intel_panel_set_backlight
0x00020910 intel_panel_disable_backlight
0x000209e0 intel_panel_enable_backlight
0x00020b20 intel_panel_detect
0x00020b40 intel_panel_setup_backlight
0x00020c20 intel_panel_destroy_backlight
.text 0x00020c30 0x5ea0 intel_pm.o
0x000248f0 intel_fbc_enabled
0x00024910 intel_enable_fbc
0x00024920 intel_disable_fbc
0x00024940 intel_update_fbc
0x00024c20 intel_update_watermarks
0x00024c40 intel_update_linetime_watermarks
0x00024c60 intel_update_sprite_watermarks
0x00024ca0 ironlake_set_drps
0x00024d40 gen6_set_rps
0x00024e80 intel_enable_rc6
0x00024f10 ironlake_teardown_rc6
0x00024f90 i915_chipset_val
0x00024fc0 i915_mch_val
0x00025150 i915_update_gfx_val
0x00025180 i915_gfx_val
0x000251b0 i915_read_mch_val
0x000251f0 i915_gpu_raise
0x00025220 i915_gpu_lower
0x00025250 i915_gpu_busy
0x000252a0 i915_gpu_turbo_disable
0x000252e0 intel_gpu_ips_init
0x000252f0 intel_gpu_ips_teardown
0x00025300 intel_disable_gt_powersave
0x000255e0 intel_enable_gt_powersave
0x00025df0 intel_init_clock_gating
0x00025e40 intel_init_power_wells
0x00025f70 intel_init_pm
0x000266b0 gen6_gt_force_wake_get
0x000266e0 gen6_gt_check_fifodbg
0x00026710 gen6_gt_force_wake_put
0x00026940 __gen6_gt_wait_for_fifo
0x000269e0 intel_gt_init
.text 0x00026ad0 0x2720 intel_ringbuffer.o
0x000277c0 intel_ring_get_active_head
0x00027830 intel_ring_setup_status_page
0x00027bd0 intel_wait_ring_buffer
0x00027da0 intel_cleanup_ring_buffer
0x00027e60 intel_ring_begin
0x00027fa0 intel_ring_advance
0x00028c30 intel_init_render_ring_buffer
0x00028f00 intel_init_bsd_ring_buffer
0x000290a0 intel_init_blt_ring_buffer
0x00029170 intel_ring_flush_all_caches
0x000291b0 intel_ring_invalidate_all_caches
.text 0x000291f0 0x3f60 intel_sdvo.o
0x0002c990 hweight16
0x0002c9d0 intel_sdvo_init
.text 0x0002d150 0x1310 intel_sprite.o
0x0002e1c0 intel_sprite_set_colorkey
0x0002e250 intel_sprite_get_colorkey
0x0002e2d0 intel_plane_init
.text 0x0002e460 0xc90 kms_display.o
0x0002e460 restore_cursor@8
0x0002e470 disable_mouse
0x0002e480 destroy_cursor
0x0002e4c0 run_workqueue@4
0x0002e510 delayed_work_timer_fn@4
0x0002e590 init_cursor
0x0002e970 set_mode
0x0002ec00 init_display_kms
0x0002ee30 get_videomodes
0x0002eef0 set_user_mode
0x0002ef70 queue_delayed_work_on
0x0002efb0 queue_delayed_work
0x0002f050 alloc_workqueue
0x0002f080 getrawmonotonic
0x0002f0b0 set_normalized_timespec
.text 0x0002f0f0 0xd0 Gtt/intel-agp.o
0x0002f0f0 agp_alloc_bridge
0x0002f130 init_agp
.text 0x0002f1c0 0xda0 Gtt/intel-gtt.o
0x0002f500 intel_enable_gtt
0x0002f660 intel_gtt_insert_sg_entries
0x0002f6c0 intel_gtt_clear_range
0x0002f710 intel_gmch_probe
0x0002ff30 intel_gtt_get
0x0002ff40 intel_gtt_chipset_flush
.text 0x0002ff60 0x110 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o
0x0002ff60 i2c_transfer
0x0002fff0 i2c_new_device
.text 0x00030070 0xb50 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o
0x00030b60 i2c_bit_add_bus
.text 0x00030bc0 0x90 e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o
0x00030bc0 drm_pci_alloc
0x00030c40 drm_pcie_get_speed_cap_mask
.text 0x00030c50 0xdd0 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o
0x00030c90 drm_mode_debug_printmodeline
0x00030d10 drm_mode_set_name
0x00030d60 drm_gtf_mode_complex
0x00030fb0 drm_gtf_mode
0x00031010 drm_cvt_mode
0x000313d0 drm_mode_list_concat
0x00031410 drm_mode_width
0x00031420 drm_mode_height
0x00031430 drm_mode_hsync
0x00031470 drm_mode_vrefresh
0x000314f0 drm_mode_set_crtcinfo
0x00031670 drm_mode_copy
0x000316b0 drm_mode_duplicate
0x000316f0 drm_mode_equal
0x000317b0 drm_mode_validate_size
0x00031810 drm_mode_validate_clocks
0x00031870 drm_mode_prune_invalid
0x00031900 drm_mode_sort
0x00031930 drm_mode_connector_list_update
.text 0x00031a20 0x80 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o
0x00031a20 drm_err
0x00031a60 drm_ut_debug_printk
0x00031a70 drm_order
.text 0x00031aa0 0x2700 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o
0x00031bb0 drm_get_dpms_name
0x00031c10 drm_get_dvi_i_select_name
0x00031c50 drm_get_dvi_i_subconnector_name
0x00031c90 drm_get_tv_select_name
0x00031cd0 drm_get_tv_subconnector_name
0x00031d10 drm_get_dirty_info_name
0x00031d50 drm_get_encoder_name
0x00031da0 drm_get_connector_name
0x00031df0 drm_get_connector_status_name
0x00031e10 drm_mode_object_find
0x00031e90 drm_framebuffer_init
0x00031ef0 drm_framebuffer_cleanup
0x00031f40 drm_crtc_init
0x00031fe0 drm_crtc_cleanup
0x00032050 drm_mode_probed_add
0x00032070 drm_connector_init
0x000321e0 drm_connector_unplug_all
0x000321f0 drm_encoder_init
0x00032280 drm_encoder_cleanup
0x000322f0 drm_plane_init
0x00032430 drm_plane_cleanup
0x000324b0 drm_mode_create
0x00032510 drm_mode_destroy
0x00032540 drm_mode_remove
0x00032570 drm_connector_cleanup
0x00032640 drm_mode_group_init
0x000326c0 drm_mode_group_init_legacy_group
0x000327b0 drm_mode_legacy_fb_format
0x00032840 drm_mode_attachmode_crtc
0x000329c0 drm_mode_detachmode_crtc
0x00032a70 drm_property_create
0x00032b90 drm_property_create_range
0x00032c10 drm_property_add_enum
0x00032d30 drm_property_destroy
0x00032dd0 drm_property_create_bitmask
0x00032e60 drm_property_create_enum
0x00032ef0 drm_mode_config_init
0x00033040 drm_mode_create_dirty_info_property
0x000330a0 drm_mode_create_dithering_property
0x00033100 drm_mode_create_scaling_mode_property
0x00033160 drm_mode_create_tv_properties
0x000334e0 drm_mode_create_dvi_i_properties
0x00033570 drm_mode_config_cleanup
0x000336a0 drm_connector_attach_property
0x000336f0 drm_connector_property_set_value
0x00033750 drm_connector_property_get_value
0x000337b0 drm_object_attach_property
0x00033800 drm_object_property_set_value
0x00033860 drm_object_property_get_value
0x000338c0 drm_mode_connector_update_edid_property
0x00033a70 drm_mode_connector_attach_encoder
0x00033ad0 drm_mode_connector_detach_encoder
0x00033b30 drm_mode_crtc_set_gamma_size
0x00033ba0 drm_mode_config_reset
0x00033c50 drm_fb_get_bpp_depth
0x00033ea0 drm_format_num_planes
0x00033f40 drm_format_plane_cpp
0x000340b0 drm_format_horz_chroma_subsampling
0x00034150 drm_format_vert_chroma_subsampling
.text 0x000341a0 0x1640 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o
0x00034250 drm_helper_probe_single_connector_modes
0x00034510 drm_helper_encoder_in_use
0x00034560 drm_helper_crtc_in_use
0x000345d0 drm_helper_disable_unused_functions
0x00034700 drm_crtc_helper_set_mode
0x00034b50 drm_crtc_helper_set_config
0x00035540 drm_helper_connector_dpms
0x00035640 drm_helper_mode_fill_fb_struct
0x000356a0 drm_helper_resume_force_mode
.text 0x000357e0 0x2a50 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o
0x00036680 drm_edid_header_is_valid
0x000366b0 drm_edid_block_valid
0x000367d0 drm_edid_is_valid
0x00036830 drm_probe_ddc
0x00036860 drm_get_edid
0x00036b40 drm_mode_find_dmt
0x00037240 drm_find_cea_extension
0x00037290 drm_edid_to_eld
0x00037710 drm_av_sync_delay
0x00037790 drm_select_eld
0x000377e0 drm_detect_hdmi_monitor
0x000378a0 drm_detect_monitor_audio
0x000379b0 drm_add_edid_modes
0x00038170 drm_add_modes_noedid
.text 0x00038230 0x2b0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o
0x00038230 div64_u64
0x000382b0 drm_calc_timestamping_constants
0x000384c0 drm_vblank_pre_modeset
0x000384d0 drm_vblank_post_modeset
.text 0x000384e0 0x280 e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o
0x000386e0 i2c_dp_aux_add_bus
.text 0x00038760 0xe00 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o
0x00038b20 drm_mm_pre_get
0x00038b90 drm_mm_get_block_generic
0x00038be0 drm_mm_get_block_range_generic
0x00038c30 drm_mm_remove_node
0x00038d70 drm_mm_put_block
0x00038dd0 drm_mm_search_free_generic
0x00038f20 drm_mm_insert_node
0x00038f80 drm_mm_search_free_in_range_generic
0x000390e0 drm_mm_insert_node_in_range
0x00039170 drm_mm_replace_node
0x000391e0 drm_mm_init_scan
0x00039220 drm_mm_init_scan_with_range
0x00039270 drm_mm_scan_add_block
0x000393b0 drm_mm_scan_remove_block
0x00039440 drm_mm_clean
0x00039460 drm_mm_init
0x000394c0 drm_mm_takedown
.text 0x00039560 0x16f0 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o
0x000398f0 drm_fb_helper_single_add_all_connectors
0x000399a0 drm_fb_helper_blank
0x00039a20 drm_fb_helper_init
0x00039c80 drm_fb_helper_setcmap
0x00039fe0 drm_fb_helper_check_var
0x0003a220 drm_fb_helper_set_par
0x0003a2d0 drm_fb_helper_pan_display
0x0003a370 drm_fb_helper_single_fb_probe
0x0003a600 drm_fb_helper_fill_fix
0x0003a670 drm_fb_helper_fill_var
0x0003a810 drm_fb_helper_initial_config
.text 0x0003ac50 0x1d8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o)
0x0003ac50 dbg_open
0x0003acba printf
0x0003ad03 dbgprintf
0x0003ad98 xf86DrvMsg
.text 0x0003ae28 0x2274 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o)
0x0003b29e malloc
0x0003c675 free
0x0003cf45 memalign
.text 0x0003d09c 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o)
0x0003d09c kref_set
0x0003d0a7 kref_init
0x0003d0b2 kref_get
0x0003d0b9 kref_put
*fill* 0x0003d0d8 0x8 00
.text 0x0003d0e0 0x50 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o)
0x0003d0e0 memset
.text 0x0003d130 0x60 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o)
0x0003d130 memcmp
.text 0x0003d190 0x4c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o)
0x0003d190 jiffies_to_msecs
0x0003d196 jiffies_to_usecs
0x0003d19f msecs_to_jiffies
0x0003d1b9 usecs_to_jiffies
*fill* 0x0003d1dc 0x4 00
.text 0x0003d1e0 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o)
0x0003d1e0 memcpy
.text 0x0003d200 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o)
0x0003d200 strncpy
.text 0x0003d220 0x12dc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o)
0x0003dda9 simple_strtoull
0x0003de8a simple_strtoul
0x0003dead simple_strtol
0x0003dee9 simple_strtoll
0x0003df26 strict_strtoul
0x0003df82 strict_strtol
0x0003dfca strict_strtoull
0x0003e030 strict_strtoll
0x0003e07f vsnprintf
0x0003e479 vsprintf
0x0003e4a4 snprintf
0x0003e4cf sprintf
.text 0x0003e4fc 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o)
0x0003e4fc strlcpy
.text 0x0003e538 0x130 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o)
0x0003e538 list_sort
.text 0x0003e668 0x638 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o)
0x0003e682 find_first_bit
0x0003e6c9 find_next_bit
0x0003e95b idr_pre_get
0x0003e9a8 idr_get_new_above
0x0003e9e7 idr_get_new
0x0003ea12 idr_remove
0x0003eb27 idr_remove_all
0x0003ebb5 idr_destroy
0x0003ebd5 idr_find
0x0003ec8b idr_init_cache
0x0003ec8c idr_init
.text 0x0003eca0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o)
0x0003eca0 strncmp
.text 0x0003ecb0 0x28 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o)
0x0003ecb0 get_fileinfo
.text 0x0003ecd8 0x28 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o)
0x0003ecd8 create_file
.text 0x0003ed00 0x28 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o)
0x0003ed00 set_file_size
.text 0x0003ed28 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o)
0x0003ed28 write_file
*fill* 0x0003ed64 0xc 00
.text 0x0003ed70 0x60 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o)
0x0003ed70 _memmove
0x0003ed86 _memcpy
.text 0x0003edd0 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o)
0x0003edd0 _strncpy
.text 0x0003edf0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o)
.text 0x0003edf0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o)
0x0003edf0 strnlen
.text 0x0003ee00 0x30 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o)
0x0003ee00 _strncmp
.text 0x0003ee30 0x20 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o)
0x0003ee30 _strnlen
*(.rdata)
.rdata 0x0003ee50 0x74 main.o
.rdata 0x0003eec4 0x16c pci.o
.rdata 0x0003f030 0x270 dvo_ch7017.o
.rdata 0x0003f2a0 0x160 dvo_ch7xxx.o
.rdata 0x0003f400 0x204 dvo_ivch.o
.rdata 0x0003f604 0x314 dvo_ns2501.o
.rdata 0x0003f918 0x178 dvo_sil164.o
.rdata 0x0003fa90 0x270 dvo_tfp410.o
.rdata 0x0003fd00 0x34c i915_dma.o
*fill* 0x0004004c 0x14 00
.rdata 0x00040060 0xa80 i915_drv.o
.rdata 0x00040ae0 0x3c0 i915_gem.o
.rdata 0x00040ea0 0x60 i915_gem_gtt.o
.rdata 0x00040f00 0xd8 i915_gem_stolen.o
.rdata 0x00040fd8 0x50 i915_gem_tiling.o
.rdata 0x00041028 0x408 i915_irq.o
.rdata 0x00041430 0x55c intel_bios.o
*fill* 0x0004198c 0x14 00
.rdata 0x000419a0 0x360 intel_crt.o
.rdata 0x00041d00 0x14e0 intel_ddi.o
.rdata 0x000431e0 0x2040 intel_display.o
.rdata 0x00045220 0xa60 intel_dp.o
.rdata 0x00045c80 0x180 intel_dvo.o
.rdata 0x00045e00 0x80 intel_fb.o
.rdata 0x00045e80 0x180 intel_hdmi.o
.rdata 0x00046000 0x160 intel_i2c.o
.rdata 0x00046160 0x260 intel_lvds.o
.rdata 0x000463c0 0xec intel_opregion.o
.rdata 0x000464ac 0x84 intel_panel.o
*fill* 0x00046530 0x10 00
.rdata 0x00046540 0x1680 intel_pm.o
.rdata 0x00047bc0 0x2c0 intel_ringbuffer.o
.rdata 0x00047e80 0x2880 intel_sdvo.o
.rdata 0x0004a700 0x40 intel_sprite.o
.rdata 0x0004a740 0x148 kms_display.o
.rdata 0x0004a888 0x1c Gtt/intel-agp.o
*fill* 0x0004a8a4 0x1c 00
.rdata 0x0004a8c0 0x9a0 Gtt/intel-gtt.o
.rdata 0x0004b260 0x24 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o
.rdata 0x0004b284 0x1a8 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o
0x0004b3e4 i2c_bit_algo
.rdata 0x0004b42c 0x70 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o
.rdata 0x0004b49c 0x18 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o
*fill* 0x0004b4b4 0xc 00
.rdata 0x0004b4c0 0x300 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o
.rdata 0x0004b7c0 0x3f8 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o
*fill* 0x0004bbb8 0x8 00
.rdata 0x0004bbc0 0x80c0 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o
.rdata 0x00053c80 0xe0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o
.rdata 0x00053d60 0x2c e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o
*fill* 0x00053d8c 0x14 00
.rdata 0x00053da0 0x160 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o
.rdata 0x00053f00 0x38c e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o
.rdata 0x0005428c 0x80 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o)
.rdata 0x0005430c 0x12c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o)
0x000543f4 hex_asc
.rdata 0x00054438 0x74 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o)
.rdata 0x000544ac 0x100 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o)
0x000544ac _ctype
 
.eh_frame 0x00055000 0xa400
.eh_frame 0x00055000 0xdc main.o
.eh_frame 0x000550dc 0x3d0 pci.o
.eh_frame 0x000554ac 0x174 dvo_ch7017.o
.eh_frame 0x00055620 0x188 dvo_ch7xxx.o
.eh_frame 0x000557a8 0x198 dvo_ivch.o
.eh_frame 0x00055940 0x24c dvo_ns2501.o
.eh_frame 0x00055b8c 0x138 dvo_sil164.o
.eh_frame 0x00055cc4 0x164 dvo_tfp410.o
.eh_frame 0x00055e28 0x88 i915_dma.o
.eh_frame 0x00055eb0 0x23c i915_drv.o
.eh_frame 0x000560ec 0x8dc i915_gem.o
.eh_frame 0x000569c8 0x40 i915_gem_context.o
.eh_frame 0x00056a08 0x214 i915_gem_gtt.o
.eh_frame 0x00056c1c 0xa8 i915_gem_stolen.o
.eh_frame 0x00056cc4 0x40 i915_gem_tiling.o
.eh_frame 0x00056d04 0x184 i915_irq.o
.eh_frame 0x00056e88 0xc8 intel_bios.o
.eh_frame 0x00056f50 0x2d0 intel_crt.o
.eh_frame 0x00057220 0x1cc intel_ddi.o
.eh_frame 0x000573ec 0x1344 intel_display.o
.eh_frame 0x00058730 0x92c intel_dp.o
.eh_frame 0x0005905c 0x210 intel_dvo.o
.eh_frame 0x0005926c 0x9c intel_fb.o
.eh_frame 0x00059308 0x50c intel_hdmi.o
.eh_frame 0x00059814 0x290 intel_i2c.o
.eh_frame 0x00059aa4 0x210 intel_lvds.o
.eh_frame 0x00059cb4 0x90 intel_modes.o
.eh_frame 0x00059d44 0x44 intel_opregion.o
.eh_frame 0x00059d88 0x1ac intel_panel.o
.eh_frame 0x00059f34 0xfb0 intel_pm.o
.eh_frame 0x0005aee4 0x6ec intel_ringbuffer.o
.eh_frame 0x0005b5d0 0x604 intel_sdvo.o
.eh_frame 0x0005bbd4 0x2f4 intel_sprite.o
.eh_frame 0x0005bec8 0x320 kms_display.o
.eh_frame 0x0005c1e8 0x5c Gtt/intel-agp.o
.eh_frame 0x0005c244 0x250 Gtt/intel-gtt.o
.eh_frame 0x0005c494 0x7c e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o
.eh_frame 0x0005c510 0x23c e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o
.eh_frame 0x0005c74c 0x5c e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o
.eh_frame 0x0005c7a8 0x334 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o
.eh_frame 0x0005cadc 0x58 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o
.eh_frame 0x0005cb34 0x9f4 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o
.eh_frame 0x0005d528 0x2c8 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o
.eh_frame 0x0005d7f0 0x6ac e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o
.eh_frame 0x0005de9c 0x94 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o
.eh_frame 0x0005df30 0x8c e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o
.eh_frame 0x0005dfbc 0x380 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o
.eh_frame 0x0005e33c 0x378 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o
.eh_frame 0x0005e6b4 0xb8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o)
.eh_frame 0x0005e76c 0x170 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o)
.eh_frame 0x0005e8dc 0x6c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o)
.eh_frame 0x0005e948 0x68 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o)
.eh_frame 0x0005e9b0 0x530 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o)
.eh_frame 0x0005eee0 0x44 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o)
.eh_frame 0x0005ef24 0x58 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o)
.eh_frame 0x0005ef7c 0x23c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o)
.eh_frame 0x0005f1b8 0x34 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o)
.eh_frame 0x0005f1ec 0x34 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o)
.eh_frame 0x0005f220 0x34 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o)
.eh_frame 0x0005f254 0x3c e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o)
 
.data 0x00060000 0xa00
*(.data)
.data 0x00060000 0x4 main.o
0x00060000 i915_modeset
.data 0x00060004 0x8 pci.o
*fill* 0x0006000c 0x14 00
.data 0x00060020 0x40 dvo_ch7017.o
0x00060020 ch7017_ops
.data 0x00060060 0x40 dvo_ch7xxx.o
0x00060060 ch7xxx_ops
.data 0x000600a0 0x40 dvo_ivch.o
0x000600a0 ivch_ops
.data 0x000600e0 0x40 dvo_ns2501.o
0x000600e0 ns2501_ops
.data 0x00060120 0x40 dvo_sil164.o
0x00060120 sil164_ops
.data 0x00060160 0x40 dvo_tfp410.o
0x00060160 tfp410_ops
.data 0x000601a0 0x0 i915_dma.o
.data 0x000601a0 0x10 i915_drv.o
0x000601a0 i915_preliminary_hw_support
0x000601a4 i915_vbt_sdvo_panel_type
0x000601a8 i915_panel_use_ssc
0x000601ac i915_semaphores
.data 0x000601b0 0x0 i915_gem.o
.data 0x000601b0 0x0 i915_gem_context.o
.data 0x000601b0 0x0 i915_gem_gtt.o
.data 0x000601b0 0x0 i915_gem_stolen.o
.data 0x000601b0 0x0 i915_gem_tiling.o
.data 0x000601b0 0x0 i915_irq.o
.data 0x000601b0 0x0 intel_bios.o
.data 0x000601b0 0x0 intel_crt.o
.data 0x000601b0 0x0 intel_ddi.o
*fill* 0x000601b0 0x10 00
.data 0x000601c0 0xc0 intel_display.o
.data 0x00060280 0x0 intel_dp.o
.data 0x00060280 0x0 intel_dvo.o
.data 0x00060280 0x80 intel_fb.o
.data 0x00060300 0x0 intel_hdmi.o
.data 0x00060300 0x0 intel_i2c.o
.data 0x00060300 0x0 intel_lvds.o
.data 0x00060300 0x0 intel_modes.o
.data 0x00060300 0x0 intel_opregion.o
.data 0x00060300 0x0 intel_panel.o
.data 0x00060300 0x0 intel_pm.o
.data 0x00060300 0x0 intel_ringbuffer.o
.data 0x00060300 0x0 intel_sdvo.o
.data 0x00060300 0x2c intel_sprite.o
.data 0x0006032c 0x0 kms_display.o
*fill* 0x0006032c 0x14 00
.data 0x00060340 0x360 Gtt/intel-agp.o
.data 0x000606a0 0x0 Gtt/intel-gtt.o
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o
.data 0x000606a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o
.data 0x000606a0 0x8 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o
0x000606a0 drm_timestamp_precision
0x000606a4 drm_vblank_offdelay
*fill* 0x000606a8 0x18 00
.data 0x000606c0 0x1e0 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o
.data 0x000608a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o
.data 0x000608a0 0x100 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o
.data 0x000609a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o
.data 0x000609a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o
.data 0x000609a0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o
.data 0x000609a0 0x8 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o
.data 0x000609a8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o)
.data 0x000609a8 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o)
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o)
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o)
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o)
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o)
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o)
.data 0x000609b4 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o)
.data 0x000609b4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o)
0x000609b4 kptr_restrict
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o)
.data 0x000609b8 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o)
 
.bss 0x00061000 0x858
*(.bss)
.bss 0x00061000 0x100 main.o
.bss 0x00061100 0x0 pci.o
.bss 0x00061100 0x0 dvo_ch7017.o
.bss 0x00061100 0x0 dvo_ch7xxx.o
.bss 0x00061100 0x0 dvo_ivch.o
.bss 0x00061100 0x0 dvo_ns2501.o
.bss 0x00061100 0x0 dvo_sil164.o
.bss 0x00061100 0x0 dvo_tfp410.o
.bss 0x00061100 0x0 i915_dma.o
.bss 0x00061100 0x200 i915_drv.o
0x00061100 i915_enable_ppgtt
0x00061104 i915_enable_hangcheck
0x00061108 i915_lvds_downclock
0x0006110c i915_enable_fbc
0x00061110 i915_enable_rc6
0x00061114 i915_powersave
0x00061118 i915_panel_ignore_lid
.bss 0x00061300 0x0 i915_gem.o
.bss 0x00061300 0x0 i915_gem_context.o
.bss 0x00061300 0x0 i915_gem_gtt.o
.bss 0x00061300 0x0 i915_gem_stolen.o
.bss 0x00061300 0x0 i915_gem_tiling.o
.bss 0x00061300 0x4 i915_irq.o
.bss 0x00061304 0x4 intel_bios.o
.bss 0x00061308 0x0 intel_crt.o
.bss 0x00061308 0x0 intel_ddi.o
.bss 0x00061308 0x0 intel_display.o
.bss 0x00061308 0x0 intel_dp.o
.bss 0x00061308 0x0 intel_dvo.o
*fill* 0x00061308 0x18 00
.bss 0x00061320 0x40 intel_fb.o
.bss 0x00061360 0x0 intel_hdmi.o
.bss 0x00061360 0x0 intel_i2c.o
.bss 0x00061360 0x0 intel_lvds.o
.bss 0x00061360 0x0 intel_modes.o
.bss 0x00061360 0x0 intel_opregion.o
.bss 0x00061360 0x0 intel_panel.o
.bss 0x00061360 0x8 intel_pm.o
0x00061360 mchdev_lock
.bss 0x00061368 0x0 intel_ringbuffer.o
.bss 0x00061368 0x0 intel_sdvo.o
.bss 0x00061368 0x0 intel_sprite.o
.bss 0x00061368 0x4 kms_display.o
*fill* 0x0006136c 0x14 00
.bss 0x00061380 0x1e0 Gtt/intel-agp.o
.bss 0x00061560 0x80 Gtt/intel-gtt.o
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o
.bss 0x000615e0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o
.bss 0x000615e0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o
0x000615e0 drm_debug
*fill* 0x000615e4 0x1c 00
.bss 0x00061600 0x40 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o
.bss 0x00061640 0x0 e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o
.bss 0x00061640 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(dbglog.o)
.bss 0x00061648 0x1f8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(malloc.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(kref.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memset.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcmp.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(time.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(memcpy.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncpy.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(vsprintf.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(string.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(list_sort.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(idr.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strncmp.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(finfo.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(create.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ssize.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(write.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_memmove.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncpy.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(ctype.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(strnlen.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strncmp.o)
.bss 0x00061840 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a(_strnlen.o)
*(COMMON)
COMMON 0x00061840 0x4 main.o
0x00061840 x86_clflush_size
COMMON 0x00061844 0x8 i915_drv.o
0x00061844 i915_lvds_channel_mode
0x00061848 main_device
COMMON 0x0006184c 0x8 kms_display.o
0x0006184c cmd_buffer
0x00061850 cmd_offset
COMMON 0x00061854 0x4 Gtt/intel-agp.o
0x00061854 intel_agp_enabled
 
/DISCARD/
*(.debug$S)
*(.debug$T)
*(.debug$F)
*(.drectve)
*(.edata)
 
.idata 0x00062000 0x400
SORT(*)(.idata$2)
.idata$2 0x00062000 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o)
0x00062000 _head_core_dll
SORT(*)(.idata$3)
0x00062014 0x4 LONG 0x0
0x00062018 0x4 LONG 0x0
0x0006201c 0x4 LONG 0x0
0x00062020 0x4 LONG 0x0
0x00062024 0x4 LONG 0x0
SORT(*)(.idata$4)
.idata$4 0x00062028 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o)
.idata$4 0x00062028 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o)
.idata$4 0x0006202c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o)
.idata$4 0x00062030 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o)
.idata$4 0x00062034 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o)
.idata$4 0x00062038 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o)
.idata$4 0x0006203c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o)
.idata$4 0x00062040 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o)
.idata$4 0x00062044 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o)
.idata$4 0x00062048 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o)
.idata$4 0x0006204c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o)
.idata$4 0x00062050 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o)
.idata$4 0x00062054 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o)
.idata$4 0x00062058 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o)
.idata$4 0x0006205c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o)
.idata$4 0x00062060 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o)
.idata$4 0x00062064 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o)
.idata$4 0x00062068 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o)
.idata$4 0x0006206c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o)
.idata$4 0x00062070 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o)
.idata$4 0x00062074 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o)
.idata$4 0x00062078 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o)
.idata$4 0x0006207c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o)
.idata$4 0x00062080 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o)
.idata$4 0x00062084 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o)
.idata$4 0x00062088 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o)
.idata$4 0x0006208c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o)
.idata$4 0x00062090 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o)
.idata$4 0x00062094 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o)
.idata$4 0x00062098 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o)
.idata$4 0x0006209c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o)
.idata$4 0x000620a0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o)
.idata$4 0x000620a4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o)
.idata$4 0x000620a8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o)
.idata$4 0x000620ac 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o)
SORT(*)(.idata$5)
.idata$5 0x000620b0 0x0 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000000.o)
.idata$5 0x000620b0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o)
0x000620b0 _imp__AllocKernelSpace
.idata$5 0x000620b4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o)
0x000620b4 _imp__AllocPage
.idata$5 0x000620b8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o)
0x000620b8 _imp__AttachIntHandler
.idata$5 0x000620bc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o)
0x000620bc _imp__CreateEvent
.idata$5 0x000620c0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o)
0x000620c0 _imp__Delay
.idata$5 0x000620c4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o)
0x000620c4 _imp__DestroyEvent
.idata$5 0x000620c8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o)
0x000620c8 _imp__DestroyObject
.idata$5 0x000620cc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o)
0x000620cc _imp__FreeKernelSpace
.idata$5 0x000620d0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o)
0x000620d0 _imp__FreePage
.idata$5 0x000620d4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o)
0x000620d4 _imp__GetDisplay
.idata$5 0x000620d8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o)
0x000620d8 _imp__GetPgAddr
.idata$5 0x000620dc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o)
0x000620dc _imp__GetService
.idata$5 0x000620e0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o)
0x000620e0 _imp__GetTimerTicks
.idata$5 0x000620e4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o)
0x000620e4 _imp__KernelAlloc
.idata$5 0x000620e8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o)
0x000620e8 _imp__KernelFree
.idata$5 0x000620ec 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o)
0x000620ec _imp__MapIoMem
.idata$5 0x000620f0 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o)
0x000620f0 _imp__MapPage
.idata$5 0x000620f4 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o)
0x000620f4 _imp__MutexInit
.idata$5 0x000620f8 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o)
0x000620f8 _imp__MutexLock
.idata$5 0x000620fc 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o)
0x000620fc _imp__MutexUnlock
.idata$5 0x00062100 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o)
0x00062100 _imp__PciApi
.idata$5 0x00062104 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o)
0x00062104 _imp__PciRead16
.idata$5 0x00062108 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o)
0x00062108 _imp__PciRead32
.idata$5 0x0006210c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o)
0x0006210c _imp__PciRead8
.idata$5 0x00062110 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o)
0x00062110 _imp__PciWrite16
.idata$5 0x00062114 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o)
0x00062114 _imp__PciWrite32
.idata$5 0x00062118 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o)
0x00062118 _imp__PciWrite8
.idata$5 0x0006211c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o)
0x0006211c _imp__RaiseEvent
.idata$5 0x00062120 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o)
0x00062120 _imp__RegService
.idata$5 0x00062124 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o)
0x00062124 _imp__SetScreen
.idata$5 0x00062128 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o)
0x00062128 _imp__SysMsgBoardStr
.idata$5 0x0006212c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o)
0x0006212c _imp__TimerHs
.idata$5 0x00062130 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o)
0x00062130 _imp__WaitEvent
.idata$5 0x00062134 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o)
SORT(*)(.idata$6)
.idata$6 0x00062138 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o)
.idata$6 0x0006214c 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o)
.idata$6 0x00062158 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o)
.idata$6 0x0006216c 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o)
.idata$6 0x0006217c 0x8 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o)
.idata$6 0x00062184 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o)
.idata$6 0x00062194 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o)
.idata$6 0x000621a4 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o)
.idata$6 0x000621b8 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o)
.idata$6 0x000621c4 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o)
.idata$6 0x000621d4 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o)
.idata$6 0x000621e0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o)
.idata$6 0x000621f0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o)
.idata$6 0x00062200 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o)
.idata$6 0x00062210 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o)
.idata$6 0x00062220 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o)
.idata$6 0x0006222c 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o)
.idata$6 0x00062238 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o)
.idata$6 0x00062244 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o)
.idata$6 0x00062250 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o)
.idata$6 0x00062260 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o)
.idata$6 0x0006226c 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o)
.idata$6 0x00062278 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o)
.idata$6 0x00062284 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o)
.idata$6 0x00062290 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o)
.idata$6 0x000622a0 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o)
.idata$6 0x000622b0 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o)
.idata$6 0x000622bc 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o)
.idata$6 0x000622cc 0x10 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o)
.idata$6 0x000622dc 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o)
.idata$6 0x000622e8 0x14 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o)
.idata$6 0x000622fc 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o)
.idata$6 0x00062308 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o)
SORT(*)(.idata$7)
.idata$7 0x00062314 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000001.o)
.idata$7 0x00062318 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000002.o)
.idata$7 0x0006231c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000004.o)
.idata$7 0x00062320 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000005.o)
.idata$7 0x00062324 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000008.o)
.idata$7 0x00062328 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000009.o)
.idata$7 0x0006232c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000010.o)
.idata$7 0x00062330 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000011.o)
.idata$7 0x00062334 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000012.o)
.idata$7 0x00062338 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000013.o)
.idata$7 0x0006233c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000015.o)
.idata$7 0x00062340 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000017.o)
.idata$7 0x00062344 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000019.o)
.idata$7 0x00062348 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000021.o)
.idata$7 0x0006234c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000022.o)
.idata$7 0x00062350 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000023.o)
.idata$7 0x00062354 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000024.o)
.idata$7 0x00062358 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000025.o)
.idata$7 0x0006235c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000026.o)
.idata$7 0x00062360 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000027.o)
.idata$7 0x00062364 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000028.o)
.idata$7 0x00062368 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000029.o)
.idata$7 0x0006236c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000030.o)
.idata$7 0x00062370 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000031.o)
.idata$7 0x00062374 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000032.o)
.idata$7 0x00062378 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000033.o)
.idata$7 0x0006237c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000034.o)
.idata$7 0x00062380 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000035.o)
.idata$7 0x00062384 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000036.o)
.idata$7 0x00062388 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000039.o)
.idata$7 0x0006238c 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000040.o)
.idata$7 0x00062390 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000041.o)
.idata$7 0x00062394 0x4 e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000043.o)
.idata$7 0x00062398 0xc e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a(d000044.o)
0x00062398 core_dll_iname
 
.reloc 0x00063000 0x2000
*(.reloc)
.reloc 0x00063000 0x1f4c dll stuff
LOAD main.o
LOAD pci.o
LOAD dvo_ch7017.o
LOAD dvo_ch7xxx.o
LOAD dvo_ivch.o
LOAD dvo_ns2501.o
LOAD dvo_sil164.o
LOAD dvo_tfp410.o
LOAD i915_dma.o
LOAD i915_drv.o
LOAD i915_gem.o
LOAD i915_gem_context.o
LOAD i915_gem_gtt.o
LOAD i915_gem_stolen.o
LOAD i915_gem_tiling.o
LOAD i915_irq.o
LOAD intel_bios.o
LOAD intel_crt.o
LOAD intel_ddi.o
LOAD intel_display.o
LOAD intel_dp.o
LOAD intel_dvo.o
LOAD intel_fb.o
LOAD intel_hdmi.o
LOAD intel_i2c.o
LOAD intel_lvds.o
LOAD intel_modes.o
LOAD intel_opregion.o
LOAD intel_panel.o
LOAD intel_pm.o
LOAD intel_ringbuffer.o
LOAD intel_sdvo.o
LOAD intel_sprite.o
LOAD kms_display.o
LOAD Gtt/intel-agp.o
LOAD Gtt/intel-gtt.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-core.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../i2c/i2c-algo-bit.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_pci.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_modes.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_stub.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_crtc_helper.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_edid.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_irq.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_dp_i2c_helper.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_mm.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../drm_fb_helper.o
LOAD e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libddk.a
LOAD e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libcore.a
LOAD e:/kos/kolibri/drivers/video/drm/i915/../../../ddk/libgcc.a
OUTPUT(i915.dll pei-i386)
LOAD dll stuff
/drivers/video/drm/i915/i915_dma.c
26,16 → 26,15
*
*/
 
#include "drmP.h"
#include "drm.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/intel-gtt.h>
#include "i915_trace.h"
//#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
//#include <linux/vgaarb.h>
//#include <linux/acpi.h>
46,15 → 45,55
 
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen);
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
 
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
#define BEGIN_LP_RING(n) \
intel_ring_begin(LP_RING(dev_priv), (n))
 
#define OUT_RING(x) \
intel_ring_emit(LP_RING(dev_priv), x)
 
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
 
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
 
static inline u32
intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
if (I915_NEED_GFX_HWS(dev_priv->dev))
return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
else
return intel_read_status_page(LP_RING(dev_priv), reg);
}
 
#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_BREADCRUMB_INDEX 0x21
 
void i915_update_dri1_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
 
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_dispatch =
READ_BREADCRUMB(dev_priv);
}
}
 
static void i915_write_hws_pga(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
76,7 → 115,7
 
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
(void*)drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
 
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
83,306 → 122,1000
return -ENOMEM;
}
 
memset((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
0, PAGE_SIZE);
 
i915_write_hws_pga(dev);
 
dbgprintf("Enabled hardware status page\n");
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
 
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
 
if (ring->status_page.gfx_addr) {
ring->status_page.gfx_addr = 0;
iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
}
 
/* Need to rewrite hardware status page */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
 
#if 0
 
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
/*
* We should never lose context on the ring with modesetting
* as we don't expose it to userspace
*/
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
 
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
 
if (!dev->primary->master)
return;
 
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4*4096)
master_priv = dev->primary->master->driver_priv;
if (ring->head == ring->tail && master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
 
#define DEVEN_REG 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
 
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq_enabled)
drm_irq_uninstall(dev);
 
mutex_lock(&dev->struct_mutex);
for (i = 0; i < I915_NUM_RINGS; i++)
intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
 
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
 
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
return 0;
}
 
static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret;
 
dev_priv->mchbar_need_disable = false;
 
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
master_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
enabled = temp & 1;
DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
}
 
/* If it's already enabled, don't have to do anything */
if (enabled)
return;
if (init->ring_size != 0) {
if (LP_RING(dev_priv)->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
 
dbgprintf("Epic fail\n");
ret = intel_render_ring_init_dri(dev,
init->ring_start,
init->ring_size);
if (ret) {
i915_dma_cleanup(dev);
return ret;
}
}
 
#if 0
if (intel_alloc_mchbar_resource(dev))
return;
dev_priv->dri1.cpp = init->cpp;
dev_priv->dri1.back_offset = init->back_offset;
dev_priv->dri1.front_offset = init->front_offset;
dev_priv->dri1.current_page = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->pf_current_page = 0;
 
dev_priv->mchbar_need_disable = true;
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->dri1.allow_batchbuffer = 1;
 
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
return 0;
}
#endif
 
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
DRM_DEBUG_DRIVER("%s\n", __func__);
 
if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
 
/* Program Hardware Status Page */
if (!ring->status_page.page_addr) {
DRM_ERROR("Can not find hardware status page\n");
return -EINVAL;
}
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
intel_ring_setup_status_page(ring);
else
i915_write_hws_pga(dev);
 
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
 
return 0;
}
 
static int i915_dma_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_init_t *init = data;
int retcode = 0;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
switch (init->func) {
case I915_INIT_DMA:
retcode = i915_initialize(dev, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
break;
case I915_RESUME_DMA:
retcode = i915_dma_resume(dev);
break;
default:
retcode = -EINVAL;
break;
}
 
return retcode;
}
 
/* Implement basically the same security restrictions as hardware does
* for MI_BATCH_NON_SECURE. These can be made stricter at any time.
*
* Most of the calculations below involve calculating the size of a
* particular instruction. It's important to get the size right as
* that tells us where the next instruction to check is. Any illegal
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
switch ((cmd >> 23) & 0x3f) {
case 0x0:
return 1; /* MI_NOOP */
case 0x4:
return 1; /* MI_FLUSH */
default:
return 0; /* disallow everything else */
}
break;
case 0x1:
return 0; /* reserved */
case 0x2:
return (cmd & 0xff) + 2; /* 2d commands */
case 0x3:
if (((cmd >> 24) & 0x1f) <= 0x18)
return 1;
 
switch ((cmd >> 24) & 0x1f) {
case 0x1c:
return 1;
case 0x1d:
switch ((cmd >> 16) & 0xff) {
case 0x3:
return (cmd & 0x1f) + 2;
case 0x4:
return (cmd & 0xf) + 2;
default:
return (cmd & 0xffff) + 2;
}
case 0x1e:
if (cmd & (1 << 23))
return (cmd & 0xffff) + 1;
else
return 1;
case 0x1f:
if ((cmd & (1 << 23)) == 0) /* inline vertices */
return (cmd & 0x1ffff) + 2;
else if (cmd & (1 << 17)) /* indirect random */
if ((cmd & 0xffff) == 0)
return 0; /* unknown length, too hard */
else
return (((cmd & 0xffff) + 1) / 2) + 1;
else
return 2; /* indirect sequential */
default:
return 0;
}
default:
return 0;
}
 
return 0;
}
 
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i, ret;
 
if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
 
for (i = 0; i < dwords;) {
int sz = validate_cmd(buffer[i]);
if (sz == 0 || i + sz > dwords)
return -EINVAL;
i += sz;
}
 
ret = BEGIN_LP_RING((dwords+1)&~1);
if (ret)
return ret;
 
#define LFB_SIZE 0xC00000
for (i = 0; i < dwords; i++)
OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
 
static int i915_load_gem_init(struct drm_device *dev)
ADVANCE_LP_RING();
 
return 0;
}
 
int
i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size, gtt_size, mappable_size;
int ret;
 
prealloc_size = dev_priv->mm.gtt->stolen_size;
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
 
dbgprintf("%s prealloc: %x gtt: %x mappable: %x\n",__FUNCTION__,
prealloc_size, gtt_size, mappable_size);
if (INTEL_INFO(dev)->gen >= 4) {
ret = BEGIN_LP_RING(4);
if (ret)
return ret;
 
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
} else {
ret = BEGIN_LP_RING(6);
if (ret)
return ret;
 
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently
* prefetches past the end of the object, and we've seen multiple
* hangs with the GPU head pointer stuck in a batchbuffer bound
* at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture.
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
}
ADVANCE_LP_RING();
 
return 0;
}
 
/* XXX: Emitting the counter should really be moved to part of the IRQ
* emit. For now, do it in both places:
*/
i915_gem_do_init(dev, LFB_SIZE, mappable_size, gtt_size - PAGE_SIZE - LFB_SIZE);
 
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
static void i915_emit_breadcrumb(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
}
 
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
{
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
 
if (cmd->sz & 0x3) {
DRM_ERROR("alignment");
return -EINVAL;
}
 
i915_kernel_lost_context(dev);
 
count = nbox ? nbox : 1;
 
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
}
 
/* Try to set up FBC with a reasonable compressed buffer size */
// if (I915_HAS_FBC(dev) && i915_powersave) {
// int cfb_size;
ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
if (ret)
return ret;
}
 
/* Leave 1M for line length buffer & misc. */
i915_emit_breadcrumb(dev);
return 0;
}
 
/* Try to get a 32M buffer... */
// if (prealloc_size > (36*1024*1024))
// cfb_size = 32*1024*1024;
// else /* fall back to 7/8 of the stolen space */
// cfb_size = prealloc_size * 7 / 8;
// i915_setup_compression(dev, cfb_size);
// }
static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
int i, count, ret;
 
/* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
return -EINVAL;
}
 
i915_kernel_lost_context(dev);
 
count = nbox ? nbox : 1;
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, &cliprects[i],
batch->DR1, batch->DR4);
if (ret)
return ret;
}
 
if (!IS_I830(dev) && !IS_845G(dev)) {
ret = BEGIN_LP_RING(2);
if (ret)
return ret;
 
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
} else {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
} else {
ret = BEGIN_LP_RING(4);
if (ret)
return ret;
 
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
}
ADVANCE_LP_RING();
}
 
 
if (IS_G4X(dev) || IS_GEN5(dev)) {
if (BEGIN_LP_RING(2) == 0) {
OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
}
}
 
i915_emit_breadcrumb(dev);
return 0;
}
 
static int i915_load_modeset_init(struct drm_device *dev)
static int i915_dispatch_flip(struct drm_device * dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
int ret;
 
ret = intel_parse_bios(dev);
if (!master_priv->sarea_priv)
return -EINVAL;
 
DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->dri1.current_page,
master_priv->sarea_priv->pf_current_page);
 
i915_kernel_lost_context(dev);
 
ret = BEGIN_LP_RING(10);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
return ret;
 
// intel_register_dsm_handler();
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
 
/* IIR "flip pending" bit means done if this bit is set */
if (IS_GEN3(dev) && (I915_READ(ECOSKPD) & ECO_FLIP_DONE))
dev_priv->flip_pending_is_done = true;
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->dri1.current_page == 0) {
OUT_RING(dev_priv->dri1.back_offset);
dev_priv->dri1.current_page = 1;
} else {
OUT_RING(dev_priv->dri1.front_offset);
dev_priv->dri1.current_page = 0;
}
OUT_RING(0);
 
intel_modeset_init(dev);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
 
ret = i915_load_gem_init(dev);
if (ret)
goto cleanup_vga_switcheroo;
ADVANCE_LP_RING();
 
intel_modeset_gem_init(dev);
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
 
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
return 0;
}
 
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
static int i915_quiescent(struct drm_device *dev)
{
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
 
// drm_kms_helper_poll_init(dev);
i915_kernel_lost_context(dev);
return intel_wait_ring_idle(ring);
}
 
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
mutex_lock(&dev->struct_mutex);
ret = i915_quiescent(dev);
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
static int i915_batchbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!dev_priv->dri1.allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
return -EINVAL;
}
 
DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
if (batch->num_cliprects < 0)
return -EINVAL;
 
if (batch->num_cliprects) {
cliprects = kcalloc(batch->num_cliprects,
sizeof(struct drm_clip_rect),
GFP_KERNEL);
if (cliprects == NULL)
return -ENOMEM;
 
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0) {
ret = -EFAULT;
goto fail_free;
}
}
 
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
mutex_unlock(&dev->struct_mutex);
 
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
fail_free:
kfree(cliprects);
 
return ret;
}
 
static int i915_cmdbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
int ret;
 
DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
 
batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
if (batch_data == NULL)
return -ENOMEM;
 
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
if (ret != 0) {
ret = -EFAULT;
goto fail_batch_free;
}
 
if (cmdbuf->num_cliprects) {
cliprects = kcalloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect), GFP_KERNEL);
if (cliprects == NULL) {
ret = -ENOMEM;
goto fail_batch_free;
}
 
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0) {
ret = -EFAULT;
goto fail_clip_free;
}
}
 
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
goto fail_clip_free;
}
 
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
 
fail_clip_free:
kfree(cliprects);
fail_batch_free:
kfree(batch_data);
 
return ret;
}
 
static int i915_emit_irq(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
i915_kernel_lost_context(dev);
 
DRM_DEBUG_DRIVER("\n");
 
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 1;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
 
return dev_priv->counter;
}
 
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
 
if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
}
 
cleanup_irq:
// drm_irq_uninstall(dev);
cleanup_gem:
// mutex_lock(&dev->struct_mutex);
// i915_gem_cleanup_ringbuffer(dev);
// mutex_unlock(&dev->struct_mutex);
cleanup_vga_switcheroo:
// vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
// vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
if (ring->irq_get(ring)) {
DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
READ_BREADCRUMB(dev_priv) >= irq_nr);
ring->irq_put(ring);
} else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
ret = -EBUSY;
 
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
}
 
return ret;
}
 
/* Needs the lock as it touches the ring.
*/
static int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_irq_emit_t *emit = data;
int result;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
mutex_lock(&dev->struct_mutex);
result = i915_emit_irq(dev);
mutex_unlock(&dev->struct_mutex);
 
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
return -EFAULT;
}
 
return 0;
}
 
/* Doesn't need the hardware lock.
*/
static int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
drm_i915_irq_wait_t *irqwait = data;
 
tmp = I915_READ(CLKCFG);
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
dev_priv->fsb_freq = 533; /* 133*4 */
break;
case CLKCFG_FSB_800:
dev_priv->fsb_freq = 800; /* 200*4 */
break;
case CLKCFG_FSB_667:
dev_priv->fsb_freq = 667; /* 167*4 */
break;
case CLKCFG_FSB_400:
dev_priv->fsb_freq = 400; /* 100*4 */
break;
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
switch (tmp & CLKCFG_MEM_MASK) {
case CLKCFG_MEM_533:
dev_priv->mem_freq = 533;
break;
case CLKCFG_MEM_667:
dev_priv->mem_freq = 667;
break;
case CLKCFG_MEM_800:
dev_priv->mem_freq = 800;
break;
return i915_wait_irq(dev, irqwait->irq_seq);
}
 
/* detect pineview DDR3 setting */
tmp = I915_READ(CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_vblank_pipe_t *pipe = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
return 0;
}
 
/**
* Schedule buffer swap at given vertical blank.
*/
static int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
/* The delayed swap mechanism was fundamentally racy, and has been
* removed. The model was that the client requested a delayed flip/swap
* from the kernel, then waited for vblank before continuing to perform
* rendering. The problem was that the kernel might wake the client
* up before it dispatched the vblank swap (since the lock has to be
* held while touching the ringbuffer), in which case the client would
* clear and start the next frame before the swap occurred, and
* flicker would occur in addition to likely missing the vblank.
*
* In the absence of this ioctl, userland falls back to a correct path
* of waiting for a vblank, then dispatching the swap on its own.
* Context switching to userland and back is plenty fast enough for
* meeting the requirements of vblank swapping.
*/
return -EINVAL;
}
 
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
DRM_DEBUG_DRIVER("%s\n", __func__);
 
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_flip(dev);
mutex_unlock(&dev->struct_mutex);
 
return ret;
}
 
static int i915_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u16 ddrpll, csipll;
drm_i915_getparam_t *param = data;
int value;
 
ddrpll = I915_READ16(DDRMPLL1);
csipll = I915_READ16(CSIPLL0);
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
switch (ddrpll & 0xff) {
case 0xc:
dev_priv->mem_freq = 800;
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->pdev->irq ? 1 : 0;
break;
case 0x10:
dev_priv->mem_freq = 1066;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
break;
case 0x14:
dev_priv->mem_freq = 1333;
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
case 0x18:
dev_priv->mem_freq = 1600;
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
default:
DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
dev_priv->mem_freq = 0;
case I915_PARAM_HAS_GEM:
value = 1;
break;
}
 
dev_priv->r_t = dev_priv->mem_freq;
 
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
case I915_PARAM_NUM_FENCES_AVAIL:
value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
break;
case 0x00e:
dev_priv->fsb_freq = 3733;
case I915_PARAM_HAS_OVERLAY:
value = dev_priv->overlay ? 1 : 0;
break;
case 0x010:
dev_priv->fsb_freq = 4266;
case I915_PARAM_HAS_PAGEFLIPPING:
value = 1;
break;
case 0x012:
dev_priv->fsb_freq = 4800;
case I915_PARAM_HAS_EXECBUF2:
/* depends on GEM */
value = 1;
break;
case 0x014:
dev_priv->fsb_freq = 5333;
case I915_PARAM_HAS_BSD:
value = intel_ring_initialized(&dev_priv->ring[VCS]);
break;
case 0x016:
dev_priv->fsb_freq = 5866;
case I915_PARAM_HAS_BLT:
value = intel_ring_initialized(&dev_priv->ring[BCS]);
break;
case 0x018:
dev_priv->fsb_freq = 6400;
case I915_PARAM_HAS_RELAXED_FENCING:
value = 1;
break;
case I915_PARAM_HAS_COHERENT_RINGS:
value = 1;
break;
case I915_PARAM_HAS_EXEC_CONSTANTS:
value = INTEL_INFO(dev)->gen >= 4;
break;
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
case I915_PARAM_HAS_GEN7_SOL_RESET:
value = 1;
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
break;
case I915_PARAM_HAS_WAIT_TIMEOUT:
value = 1;
break;
case I915_PARAM_HAS_SEMAPHORES:
value = i915_semaphore_is_enabled(dev);
break;
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
csipll & 0x3ff);
dev_priv->fsb_freq = 0;
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
return -EINVAL;
}
 
if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
return -EFAULT;
}
 
return 0;
}
 
static int i915_setparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_setparam_t *param = data;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0;
break;
case I915_SETPARAM_NUM_USED_FENCES:
if (param->value > dev_priv->num_fence_regs ||
param->value < 0)
return -EINVAL;
/* Userspace can use first N regs */
dev_priv->fence_reg_start = param->value;
break;
default:
DRM_DEBUG_DRIVER("unknown parameter %d\n",
param->param);
return -EINVAL;
}
 
if (dev_priv->fsb_freq == 3200) {
dev_priv->c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
dev_priv->c_m = 1;
} else {
dev_priv->c_m = 2;
return 0;
}
#endif
 
 
static int i915_set_status_page(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
WARN(1, "tried to set status page when mode setting active\n");
return 0;
}
 
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
dev_priv->dri1.gfx_hws_cpu_addr =
ioremap(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
 
memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
 
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
ring->status_page.page_addr);
return 0;
}
 
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
395,20 → 1128,157
return 0;
}
 
#define MCHBAR_I915 0x44
#define MCHBAR_I965 0x48
#define MCHBAR_SIZE (4*4096)
 
/* Global for IPS driver to get at the current i915 device */
static struct drm_i915_private *i915_mch_dev;
/*
* Lock protecting IPS related data structures
* - i915_mch_dev
* - dev_priv->max_delay
* - dev_priv->min_delay
* - dev_priv->fmax
* - dev_priv->gpu_busy
#define DEVEN_REG 0x54
#define DEVEN_MCHBAR_EN (1 << 28)
 
 
 
 
/* Setup MCHBAR if possible, return true if we should disable it again */
static void
intel_setup_mchbar(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
 
dev_priv->mchbar_need_disable = false;
 
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
enabled = temp & 1;
}
 
/* If it's already enabled, don't have to do anything */
if (enabled)
return;
 
dbgprintf("Epic fail\n");
 
#if 0
if (intel_alloc_mchbar_resource(dev))
return;
 
dev_priv->mchbar_need_disable = true;
 
/* Space is allocated or reserved, so enable it. */
if (IS_I915G(dev) || IS_I915GM(dev)) {
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
temp | DEVEN_MCHBAR_EN);
} else {
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
}
#endif
}
 
 
/* true = enable decode, false = disable decoder */
static unsigned int i915_vga_set_decode(void *cookie, bool state)
{
struct drm_device *dev = cookie;
 
intel_modeset_vga_set_state(dev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
 
 
 
 
 
 
static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = intel_parse_bios(dev);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
 
// intel_register_dsm_handler();
 
/* Initialise stolen first so that we may reserve preallocated
* objects for the BIOS to KMS transition.
*/
static DEFINE_SPINLOCK(mchdev_lock);
ret = i915_gem_init_stolen(dev);
if (ret)
goto cleanup_vga_switcheroo;
 
intel_modeset_init(dev);
 
ret = i915_gem_init(dev);
if (ret)
goto cleanup_gem_stolen;
 
intel_modeset_gem_init(dev);
 
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
 
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
 
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
 
// drm_kms_helper_poll_init(dev);
 
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
 
return 0;
 
cleanup_irq:
// drm_irq_uninstall(dev);
cleanup_gem:
// mutex_lock(&dev->struct_mutex);
// i915_gem_cleanup_ringbuffer(dev);
// mutex_unlock(&dev->struct_mutex);
// i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_gem_stolen:
// i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
// vga_switcheroo_unregister_client(dev->pdev);
cleanup_vga_client:
// vga_client_register(dev->pdev, NULL, NULL, NULL);
out:
return ret;
}
 
 
 
 
static void i915_dump_device_info(struct drm_i915_private *dev_priv)
{
const struct intel_device_info *info = dev_priv->info;
 
#define DEV_INFO_FLAG(name) info->name ? #name "," : ""
#define DEV_INFO_SEP ,
DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
"%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
info->gen,
dev_priv->dev->pdev->device,
DEV_INFO_FLAGS);
#undef DEV_INFO_FLAG
#undef DEV_INFO_SEP
}
 
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
423,11 → 1293,27
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
int ret = 0, mmio_bar;
uint32_t agp_size;
struct intel_device_info *info;
int ret = 0, mmio_bar, mmio_size;
uint32_t aperture_size;
 
ENTER();
 
info = (struct intel_device_info *) flags;
 
#if 0
/* Refuse to load on gen6+ without kms enabled. */
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
#endif
 
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
434,13 → 1320,32
 
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
dev_priv->info = (struct intel_device_info *) flags;
dev_priv->info = info;
 
i915_dump_device_info(dev_priv);
 
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
}
 
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
ret = -EIO;
goto put_bridge;
}
 
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_gmch;
}
 
 
pci_set_master(dev->pdev);
 
/* overlay on gen2 is broken and can't address above 1G */
// if (IS_GEN2(dev))
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
457,43 → 1362,41
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 
mmio_bar = IS_GEN2(dev) ? 1 : 0;
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
/* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
* in the same BAR, so we want to restrict this ioremap from
* clobbering the GTT which we want ioremap_wc instead. Fortunately,
* the register BAR remains the same size for all the earlier
* generations up to Ironlake.
*/
if (info->gen < 5)
mmio_size = 512*1024;
else
mmio_size = 2*1024*1024;
 
dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
goto put_gmch;
}
 
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto out_rmmap;
}
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
 
// agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dbgprintf("gtt_base_addr %x aperture_size %d\n",
dev_priv->mm.gtt_base_addr, aperture_size );
 
/* agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; */
 
// dev_priv->mm.gtt_mapping =
// io_mapping_create_wc(dev->agp->base, agp_size);
// io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
// aperture_size);
// if (dev_priv->mm.gtt_mapping == NULL) {
// ret = -EIO;
// goto out_rmmap;
// }
 
/* Set up a WC MTRR for non-PAT systems. This is more common than
* one would think, because the kernel disables PAT on first
* generation Core chips because WC PAT gets overridden by a UC
* MTRR if present. Even if a UC MTRR isn't present.
*/
// dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
// agp_size,
// MTRR_TYPE_WRCOMB, 1);
// if (dev_priv->mm.gtt_mtrr < 0) {
// DRM_INFO("MTRR allocation failed. Graphics "
// "performance may suffer.\n");
// }
// i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
// aperture_size);
 
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
506,11 → 1409,9
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
* workqueue at any time: max_active = 1 and NON_REENTRANT.
* workqueue at any time. Use an ordered one.
*/
dev_priv->wq = alloc_workqueue("i915",
WQ_UNBOUND | WQ_NON_REENTRANT,
1);
dev_priv->wq = alloc_ordered_workqueue("i915", 0);
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
517,10 → 1418,11
goto out_mtrrfree;
}
 
/* enable GEM by default */
dev_priv->has_gem = 1;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev);
 
intel_irq_init(dev);
intel_gt_init(dev);
 
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
539,11 → 1441,6
goto out_gem_unload;
}
 
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
else if (IS_GEN5(dev))
i915_ironlake_get_mem_freq(dev);
 
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
555,15 → 1452,13
* be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/
// if (!IS_I945G(dev) && !IS_I945GM(dev))
// pci_enable_msi(dev->pdev);
 
spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
 
if (IS_IVYBRIDGE(dev))
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
dev_priv->num_pipe = 2;
577,8 → 1472,6
/* Start out suspended */
dev_priv->mm.suspended = 1;
 
intel_detect_pch(dev);
 
ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
592,12 → 1485,9
// setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
// (unsigned long) dev);
 
spin_lock(&mchdev_lock);
i915_mch_dev = dev_priv;
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
 
// ips_ping_for_i915_load();
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
 
LEAVE();
 
615,14 → 1505,16
// destroy_workqueue(dev_priv->wq);
out_mtrrfree:
// if (dev_priv->mm.gtt_mtrr >= 0) {
// mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
// dev->agp->agp_info.aper_size * 1024 * 1024);
// mtrr_del(dev_priv->mm.gtt_mtrr,
// dev_priv->mm.gtt_base_addr,
// aperture_size);
// dev_priv->mm.gtt_mtrr = -1;
// }
// io_mapping_free(dev_priv->mm.gtt_mapping);
 
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
// intel_gmch_remove();
put_bridge:
// pci_dev_put(dev_priv->bridge_dev);
free_priv:
630,3 → 1522,226
return ret;
}
 
#if 0
 
int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
intel_gpu_ips_teardown();
 
i915_teardown_sysfs(dev);
 
if (dev_priv->mm.inactive_shrinker.shrink)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
i915_gem_retire_requests(dev);
mutex_unlock(&dev->struct_mutex);
 
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
io_mapping_free(dev_priv->mm.gtt_mapping);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr,
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
dev_priv->mm.gtt_mtrr = -1;
}
 
acpi_video_unregister();
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
 
/*
* free the memory space allocated for the child device
* config parsed from VBT
*/
if (dev_priv->child_dev && dev_priv->child_dev_num) {
kfree(dev_priv->child_dev);
dev_priv->child_dev = NULL;
dev_priv->child_dev_num = 0;
}
 
vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
 
/* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->hangcheck_timer);
cancel_work_sync(&dev_priv->error_work);
i915_destroy_error_state(dev);
 
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
 
intel_opregion_fini(dev);
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
 
mutex_lock(&dev->struct_mutex);
i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
 
intel_cleanup_overlay(dev);
 
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
}
 
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
 
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
 
destroy_workqueue(dev_priv->wq);
 
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
 
return 0;
}
 
int i915_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv;
 
DRM_DEBUG_DRIVER("\n");
file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
if (!file_priv)
return -ENOMEM;
 
file->driver_priv = file_priv;
 
spin_lock_init(&file_priv->mm.lock);
INIT_LIST_HEAD(&file_priv->mm.request_list);
 
idr_init(&file_priv->context_idr);
 
return 0;
}
 
/**
* i915_driver_lastclose - clean up after all DRM clients have exited
* @dev: DRM device
*
* Take care of cleaning up after all DRM clients have exited. In the
* mode setting case, we want to restore the kernel's initial mode (just
* in case the last client left us in a bad state).
*
* Additionally, in the non-mode setting case, we'll tear down the GTT
* and DMA structures, since the kernel won't be using them, and clea
* up any GEM state.
*/
void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
 
/* On gen6+ we refuse to init without kms enabled, but then the drm core
* goes right around and calls lastclose. Check for this and don't clean
* up anything. */
if (!dev_priv)
return;
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fb_restore_mode(dev);
vga_switcheroo_process_delayed_switch();
return;
}
 
i915_gem_lastclose(dev);
 
i915_dma_cleanup(dev);
}
 
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
}
 
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
kfree(file_priv);
}
 
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
};
 
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
 
/*
* This is really ugly: Because old userspace abused the linux agp interface to
* manage the gtt, we need to claim that all intel devices are agp. For
* otherwise the drm core refuses to initialize the agp support code.
*/
int i915_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
#endif
/drivers/video/drm/i915/i915_drv.c
28,9 → 28,8
*/
 
//#include <linux/device.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
 
41,6 → 40,8
#include <errno-base.h>
#include <linux/pci.h>
 
#include <drm/drm_crtc_helper.h>
 
#include <syscall.h>
 
#define __read_mostly
49,20 → 50,79
 
struct drm_device *main_device;
 
static int i915_modeset __read_mostly = 1;
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"1=on, -1=force vga console preference [default])");
 
 
int i915_panel_ignore_lid __read_mostly = 0;
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect [default], 1=lid open, "
"-1=lid closed)");
 
unsigned int i915_powersave __read_mostly = 0;
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
unsigned int i915_enable_rc6 __read_mostly = -1;
int i915_semaphores __read_mostly = -1;
 
unsigned int i915_enable_fbc __read_mostly = 0;
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
int i915_enable_rc6 __read_mostly = 0;
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
"(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). "
"For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. "
"default: -1 (use per-chip default)");
 
int i915_enable_fbc __read_mostly = 0;
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
 
unsigned int i915_lvds_downclock __read_mostly = 0;
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
 
unsigned int i915_panel_use_ssc __read_mostly = 1;
int i915_lvds_channel_mode __read_mostly;
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 
int i915_panel_use_ssc __read_mostly = -1;
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
 
int i915_vbt_sdvo_panel_type __read_mostly = -1;
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
static bool i915_try_reset __read_mostly = true;
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
bool i915_enable_hangcheck __read_mostly = false;
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
 
int i915_enable_ppgtt __read_mostly = false;
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
 
unsigned int i915_preliminary_hw_support __read_mostly = true;
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support. "
"Enable Haswell and ValleyView Support. "
"(default: false)");
 
 
#define PCI_VENDOR_ID_INTEL 0x8086
 
#define INTEL_VGA_DEVICE(id, info) { \
137,7 → 197,7
 
static const struct intel_device_info intel_ironlake_d_info = {
.gen = 5,
.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
};
 
153,6 → 213,8
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct intel_device_info intel_sandybridge_m_info = {
161,6 → 223,8
.has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct intel_device_info intel_ivybridge_d_info = {
168,6 → 232,8
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct intel_device_info intel_ivybridge_m_info = {
176,8 → 242,46
.has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct intel_device_info intel_valleyview_m_info = {
.gen = 7, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
};
 
static const struct intel_device_info intel_valleyview_d_info = {
.gen = 7,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
};
 
static const struct intel_device_info intel_haswell_d_info = {
.is_haswell = 1, .gen = 7,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct intel_device_info intel_haswell_m_info = {
.is_haswell = 1, .gen = 7, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
.has_llc = 1,
.has_force_wake = 1,
};
 
static const struct pci_device_id pciidlist[] = { /* aka */
INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
217,6 → 321,46
INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */
INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */
INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */
INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */
INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */
INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */
INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */
INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */
INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */
INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */
INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */
INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */
INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */
INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */
INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */
INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
{0, 0, 0}
};
 
224,6 → 368,7
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
 
void intel_detect_pch(struct drm_device *dev)
{
244,111 → 389,48
 
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
}
}
 
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
bool i915_semaphore_is_enabled(struct drm_device *dev)
{
int count;
if (INTEL_INFO(dev)->gen < 6)
return 0;
 
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
udelay(10);
if (i915_semaphores >= 0)
return i915_semaphores;
 
I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(FORCEWAKE);
#ifdef CONFIG_INTEL_IOMMU
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
 
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
udelay(10);
return 1;
}
 
void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
int count;
 
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
 
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
POSTING_READ(FORCEWAKE_MT);
 
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
udelay(10);
}
 
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (dev_priv->forcewake_count++ == 0)
dev_priv->display.force_wake_get(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
 
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
POSTING_READ(FORCEWAKE_MT);
}
 
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (--dev_priv->forcewake_count == 0)
dev_priv->display.force_wake_put(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
// WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES);
dev_priv->gt_fifo_count = fifo;
}
dev_priv->gt_fifo_count--;
}
 
 
 
 
 
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent);
 
int i915_init(void)
357,14 → 439,7
const struct pci_device_id *ent;
int err;
 
if( init_agp() != 0)
{
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return 0;
};
 
ent = find_pci_device(&device, pciidlist);
 
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
371,9 → 446,25
return 0;
};
 
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
 
if (intel_info->is_haswell || intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
}
 
dbgprintf("device %x:%x\n", device.pci_dev.vendor,
device.pci_dev.device);
 
if (intel_info->gen != 3) {
 
} else if (init_agp() != 0) {
DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
return -ENODEV;
}
 
err = drm_get_dev(&device.pci_dev, ent);
 
return err;
394,7 → 485,7
// if (ret)
// goto err_g1;
 
// pci_set_master(pdev);
pci_set_master(pdev);
 
// if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
// printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
443,7 → 534,90
return ret;
}
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
 
static bool IS_DISPLAYREG(u32 reg)
{
/*
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
if (reg >= VLV_DISPLAY_BASE)
return false;
 
if (reg >= RENDER_RING_BASE &&
reg < RENDER_RING_BASE + 0xff)
return false;
if (reg >= GEN6_BSD_RING_BASE &&
reg < GEN6_BSD_RING_BASE + 0xff)
return false;
if (reg >= BLT_RING_BASE &&
reg < BLT_RING_BASE + 0xff)
return false;
 
if (reg == PGTBL_ER)
return false;
 
if (reg >= IPEIR_I965 &&
reg < HWSTAM)
return false;
 
if (reg == MI_MODE)
return false;
 
if (reg == GFX_MODE_GEN7)
return false;
 
if (reg == RENDER_HWS_PGA_GEN7 ||
reg == BSD_HWS_PGA_GEN7 ||
reg == BLT_HWS_PGA_GEN7)
return false;
 
if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
reg == GEN6_BSD_RNCID)
return false;
 
if (reg == GEN6_BLITTER_ECOSKPD)
return false;
 
if (reg >= 0x4000c &&
reg <= 0x4002c)
return false;
 
if (reg >= 0x4f000 &&
reg <= 0x4f08f)
return false;
 
if (reg >= 0x4f100 &&
reg <= 0x4f11f)
return false;
 
if (reg >= VLV_MASTER_IER &&
reg <= GEN6_PMIER)
return false;
 
if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
return false;
 
if (reg >= VLV_IIR_RW &&
reg <= VLV_ISR)
return false;
 
if (reg == FORCEWAKE_VLV ||
reg == FORCEWAKE_ACK_VLV)
return false;
 
if (reg == GEN6_GDRST)
return false;
 
return true;
}
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
451,11 → 625,13
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
if (dev_priv->forcewake_count == 0) \
dev_priv->display.force_wake_get(dev_priv); \
dev_priv->gt.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
if (dev_priv->forcewake_count == 0) \
dev_priv->display.force_wake_put(dev_priv); \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
/drivers/video/drm/i915/i915_drv.h
35,6 → 35,7
#include "intel_ringbuffer.h"
//#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/intel-gtt.h>
//#include <linux/backlight.h>
 
46,6 → 47,13
 
#define I915_TILING_NONE 0
 
#define VGA_RSRC_NONE 0x00
#define VGA_RSRC_LEGACY_IO 0x01
#define VGA_RSRC_LEGACY_MEM 0x02
#define VGA_RSRC_LEGACY_MASK (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
/* Non-legacy access */
#define VGA_RSRC_NORMAL_IO 0x04
#define VGA_RSRC_NORMAL_MEM 0x08
 
#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
 
68,10 → 76,34
};
#define plane_name(p) ((p) + 'A')
 
enum port {
PORT_A = 0,
PORT_B,
PORT_C,
PORT_D,
PORT_E,
I915_MAX_PORTS
};
#define port_name(p) ((p) + 'A')
 
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
 
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
 
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
if ((intel_encoder)->base.crtc == (__crtc))
 
struct intel_pch_pll {
int refcount; /* count of number of CRTCs sharing this PLL */
int active; /* count of number of active CRTCs (i.e. DPMS on) */
bool on; /* is the PLL actually active? Disabled during modeset */
int pll_reg;
int fp0_reg;
int fp1_reg;
};
#define I915_NUM_PLLS 2
 
/* Interface history:
*
* 1.1: Original.
88,6 → 120,7
 
#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
 
#define I915_GEM_PHYS_CURSOR_0 1
#define I915_GEM_PHYS_CURSOR_1 2
109,11 → 142,11
struct drm_i915_private;
 
struct intel_opregion {
struct opregion_header *header;
struct opregion_acpi *acpi;
struct opregion_swsci *swsci;
struct opregion_asle *asle;
void *vbt;
struct opregion_header __iomem *header;
struct opregion_acpi __iomem *acpi;
struct opregion_swsci __iomem *swsci;
struct opregion_asle __iomem *asle;
void __iomem *vbt;
u32 __iomem *lid_state;
};
#define OPREGION_SIZE (8*1024)
133,7 → 166,7
struct drm_i915_fence_reg {
struct list_head lru_list;
struct drm_i915_gem_object *obj;
uint32_t setup_seqno;
int pin_count;
};
 
struct sdvo_device_mapping {
150,38 → 183,50
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 ccid;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 ipeir;
u32 ipehr;
u32 instdone;
u32 acthd;
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
u32 cpu_ring_tail[I915_NUM_RINGS];
u32 error; /* gen6+ */
u32 bcs_acthd; /* gen6+ blt engine */
u32 bcs_ipehr;
u32 bcs_ipeir;
u32 bcs_instdone;
u32 bcs_seqno;
u32 vcs_acthd; /* gen6+ bsd engine */
u32 vcs_ipehr;
u32 vcs_ipeir;
u32 vcs_instdone;
u32 vcs_seqno;
u32 instpm;
u32 instps;
u32 instdone1;
u32 seqno;
u32 err_int; /* gen7 */
u32 instpm[I915_NUM_RINGS];
u32 instps[I915_NUM_RINGS];
u32 extra_instdone[I915_NUM_INSTDONE_REG];
u32 seqno[I915_NUM_RINGS];
u64 bbaddr;
u32 fault_reg[I915_NUM_RINGS];
u32 done_reg;
u32 faddr[I915_NUM_RINGS];
u64 fence[I915_MAX_NUM_FENCES];
struct timeval time;
struct drm_i915_error_ring {
struct drm_i915_error_object {
int page_count;
u32 gtt_offset;
u32 *pages[0];
} *ringbuffer[I915_NUM_RINGS], *batchbuffer[I915_NUM_RINGS];
} *ringbuffer, *batchbuffer;
struct drm_i915_error_request {
long jiffies;
u32 seqno;
u32 tail;
} *requests;
int num_requests;
} ring[I915_NUM_RINGS];
struct drm_i915_error_buffer {
u32 size;
u32 name;
u32 seqno;
u32 rseqno, wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
190,7 → 235,7
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
u32 ring:4;
s32 ring:4;
u32 cache_level:2;
} *active_bo, *pinned_bo;
u32 active_bo_count, pinned_bo_count;
199,7 → 244,6
};
 
struct drm_i915_display_funcs {
void (*dpms)(struct drm_crtc *crtc, int mode);
bool (*fbc_enabled)(struct drm_device *dev);
void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
void (*disable_fbc)(struct drm_device *dev);
208,11 → 252,16
void (*update_wm)(struct drm_device *dev);
void (*update_sprite_wm)(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb);
void (*crtc_enable)(struct drm_crtc *crtc);
void (*crtc_disable)(struct drm_crtc *crtc);
void (*off)(struct drm_crtc *crtc);
void (*write_eld)(struct drm_connector *connector,
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
223,8 → 272,6
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
232,6 → 279,37
/* pll clock increase/decrease */
};
 
struct drm_i915_gt_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
 
#define DEV_INFO_FLAGS \
DEV_INFO_FLAG(is_mobile) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i85x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i915g) DEV_INFO_SEP \
DEV_INFO_FLAG(is_i945gm) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g33) DEV_INFO_SEP \
DEV_INFO_FLAG(need_gfx_hws) DEV_INFO_SEP \
DEV_INFO_FLAG(is_g4x) DEV_INFO_SEP \
DEV_INFO_FLAG(is_pineview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_broadwater) DEV_INFO_SEP \
DEV_INFO_FLAG(is_crestline) DEV_INFO_SEP \
DEV_INFO_FLAG(is_ivybridge) DEV_INFO_SEP \
DEV_INFO_FLAG(is_valleyview) DEV_INFO_SEP \
DEV_INFO_FLAG(is_haswell) DEV_INFO_SEP \
DEV_INFO_FLAG(has_force_wake) DEV_INFO_SEP \
DEV_INFO_FLAG(has_fbc) DEV_INFO_SEP \
DEV_INFO_FLAG(has_pipe_cxsr) DEV_INFO_SEP \
DEV_INFO_FLAG(has_hotplug) DEV_INFO_SEP \
DEV_INFO_FLAG(cursor_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(has_overlay) DEV_INFO_SEP \
DEV_INFO_FLAG(overlay_needs_physical) DEV_INFO_SEP \
DEV_INFO_FLAG(supports_tv) DEV_INFO_SEP \
DEV_INFO_FLAG(has_bsd_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_blt_ring) DEV_INFO_SEP \
DEV_INFO_FLAG(has_llc)
 
struct intel_device_info {
u8 gen;
u8 is_mobile:1;
245,6 → 323,9
u8 is_broadwater:1;
u8 is_crestline:1;
u8 is_ivybridge:1;
u8 is_valleyview:1;
u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
u8 has_hotplug:1;
254,8 → 335,30
u8 supports_tv:1;
u8 has_bsd_ring:1;
u8 has_blt_ring:1;
u8 has_llc:1;
};
 
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
unsigned num_pd_entries;
dma_addr_t *pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
};
 
 
/* This must match up with the value previously used for execbuf2.rsvd1. */
#define DEFAULT_CONTEXT_ID 0
struct i915_hw_context {
int id;
bool is_initialized;
struct drm_i915_file_private *file_priv;
struct intel_ring_buffer *ring;
struct drm_i915_gem_object *obj;
};
 
enum no_fbc_reason {
FBC_NO_OUTPUT, /* no outputs enabled to compress */
FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
268,25 → 371,38
};
 
enum intel_pch {
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */
};
 
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
 
struct intel_fbdev;
struct intel_fbc_work;
 
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
struct drm_i915_private *dev_priv;
};
 
typedef struct drm_i915_private {
struct drm_device *dev;
 
const struct intel_device_info *info;
 
int has_gem;
int relative_constants_mode;
 
void __iomem *regs;
 
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
295,12 → 411,17
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
 
struct intel_gmbus {
struct i2c_adapter adapter;
struct i2c_adapter *force_bit;
u32 reg0;
} *gmbus;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
 
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
 
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
 
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
307,22 → 428,19
 
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
drm_local_map_t hws_map;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
 
// struct resource mch_res;
 
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
 
atomic_t irq_received;
 
/* protects the irq masks */
spinlock_t irq_lock;
 
/* DPIO indirect register protection */
spinlock_t dpio_lock;
 
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
332,23 → 450,18
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
 
int tex_lru_log_granularity;
int allow_batchbuffer;
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
int num_pipe;
int num_pch_pll;
 
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
uint32_t last_acthd_bsd;
uint32_t last_acthd_blt;
uint32_t last_instdone;
uint32_t last_instdone1;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
unsigned int stop_rings;
 
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
375,6 → 488,8
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
398,9 → 513,10
unsigned int fsb_freq, mem_freq, is_ddr3;
 
spinlock_t error_lock;
// struct drm_i915_error_state *first_error;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
// struct completion error_completion;
struct completion error_completion;
struct workqueue_struct *wq;
 
/* Display functions */
573,7 → 689,13
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head gtt_list;
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
* are idle and not used by the GPU) but still have
* (presumably uncached) pages still attached.
*/
struct list_head unbound_list;
 
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
581,8 → 703,14
unsigned long gtt_end;
 
// struct io_mapping *gtt_mapping;
phys_addr_t gtt_base_addr;
int gtt_mtrr;
 
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
u32 *l3_remap_info;
 
// struct shrinker inactive_shrinker;
 
/**
597,17 → 725,6
struct list_head active_list;
 
/**
* List of objects which are not in the ringbuffer but which
* still have a write_domain which needs to be flushed before
* unbinding.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is held on the buffer while on this list.
*/
struct list_head flushing_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
619,24 → 736,10
*/
struct list_head inactive_list;
 
/**
* LRU list of objects which are not in the ringbuffer but
* are still pinned in the GTT.
*/
struct list_head pinned_list;
 
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
/**
* List of objects currently pending being freed.
*
* These objects are no longer in use, but due to a signal
* we were prevented from freeing them at the appointed time.
*/
struct list_head deferred_free_list;
 
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
684,6 → 787,22
size_t object_memory;
u32 object_count;
} mm;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
 
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
} dri1;
 
/* Kernel Modesetting */
 
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
unsigned int lvds_border_bits;
693,16 → 812,14
struct drm_crtc *plane_to_crtc_mapping[3];
struct drm_crtc *pipe_to_crtc_mapping[3];
wait_queue_head_t pending_flip_queue;
bool flip_pending_is_done;
 
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
 
/* Reclocking support */
bool render_reclock_avail;
bool lvds_downclock_avail;
/* indicates the reduced downclock for LVDS*/
int lvds_downclock;
struct work_struct idle_work;
struct timer_list idle_timer;
bool busy;
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
711,13 → 828,27
 
bool mchbar_need_disable;
 
struct work_struct rps_work;
spinlock_t rps_lock;
/* gen6+ rps state */
struct {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
 
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
 
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
struct {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
 
727,15 → 858,16
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
 
int c_m;
int r_t;
u8 corr;
spinlock_t *mchdev_lock;
} ips;
 
enum no_fbc_reason no_fbc_reason;
 
// struct drm_mm_node *compressed_fb;
// struct drm_mm_node *compressed_llb;
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
 
unsigned long last_gpu_reset;
 
744,19 → 876,54
 
// struct backlight_device *backlight;
 
// struct drm_property *broadcast_rgb_property;
// struct drm_property *force_audio_property;
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
 
bool hw_contexts_disabled;
uint32_t hw_context_size;
} drm_i915_private_t;
 
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
 
enum hdmi_force_audio {
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
HDMI_AUDIO_AUTO, /* trust EDID */
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
 
enum i915_cache_level {
I915_CACHE_NONE,
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+ */
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
 
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
* of pages before to binding them into the GTT, and put_pages() is
* called after we no longer need them. As we expect there to be
* associated cost with migrating pages between the backing storage
* and making them available for the GPU (e.g. clflush), we may hold
* onto the pages after they are no longer referenced by the GPU
* in case they may be used again shortly (for example migrating the
* pages to a different memory domain within the GTT). put_pages()
* will therefore most likely be called when the object itself is
* being released or under memory pressure (where we attempt to
* reap pages for the shrinker).
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
};
 
struct drm_i915_gem_object {
struct drm_gem_object base;
 
const struct drm_i915_gem_object_ops *ops;
 
void *mapped;
 
/** Current space allocated to this object in the GTT, if any. */
763,18 → 930,16
struct drm_mm_node *gtt_space;
struct list_head gtt_list;
 
/** This object's place on the active/flushing/inactive lists */
/** This object's place on the active/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
 
/**
* This is set if the object is on the active or flushing lists
* (has pending rendering), and is not set if it's on inactive (ready
* to be unbound).
* This is set if the object is on the active lists (has pending
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
unsigned int active:1;
 
785,12 → 950,6
unsigned int dirty:1;
 
/**
* This is set if the object has been written to since the last
* GPU flush.
*/
unsigned int pending_gpu_write:1;
 
/**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
806,7 → 965,14
* Current tiling mode for the object.
*/
unsigned int tiling_mode:2;
unsigned int tiling_changed:1;
/**
* Whether the tiling parameters for the currently associated fence
* register have changed. Note that for the purposes of tracking
* tiling changes we also treat the unfenced register, the register
* slot that the object occupies whilst it executes a fenced
* command (such as BLT on gen2/3), as a "fence".
*/
unsigned int fence_dirty:1;
 
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
842,14 → 1008,17
 
unsigned int cache_level:2;
 
struct page **pages;
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
 
/**
* DMAR support
*/
struct scatterlist *sg_list;
int num_sg;
struct pagelist pages;
int pages_pin_count;
 
/* prime dma-buf support */
void *dma_buf_vmapping;
int vmapping_count;
 
/**
* Used for performing relocations during execbuffer insertion.
*/
864,13 → 1033,13
*/
uint32_t gtt_offset;
 
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
struct intel_ring_buffer *ring;
 
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_read_seqno;
uint32_t last_write_seqno;
/** Breadcrumb of last fenced GPU access to the buffer. */
uint32_t last_fenced_seqno;
struct intel_ring_buffer *last_fenced_ring;
 
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
878,13 → 1047,6
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
 
 
/**
* If present, while GEM_DOMAIN_CPU is in the read domain this array
* flags which individual pages are valid.
*/
uint8_t *page_cpu_valid;
 
/** User space pin count and filp owning the pin */
uint32_t user_pin_count;
struct drm_file *pin_filp;
919,6 → 1081,9
/** GEM sequence number associated with this request. */
uint32_t seqno;
 
/** Postion in the ringbuffer of the end of the request */
u32 tail;
 
/** Time at which this request was emitted, in jiffies. */
unsigned long emitted_jiffies;
 
935,6 → 1100,7
spinlock_t lock;
struct list_head request_list;
} mm;
struct idr context_idr;
};
 
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
958,6 → 1124,8
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
 
/*
975,8 → 1143,12
 
#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
 
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
 
998,32 → 1170,62
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 
#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev))
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
 
//#include "i915_trace.h"
#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
 
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
extern int i915_panel_ignore_lid;
extern unsigned int i915_powersave;
extern unsigned int i915_semaphores;
extern unsigned int i915_lvds_downclock;
extern unsigned int i915_panel_use_ssc;
extern int i915_vbt_sdvo_panel_type;
extern unsigned int i915_enable_rc6;
extern unsigned int i915_enable_fbc;
extern bool i915_enable_hangcheck;
#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
 
extern int i915_resume(struct drm_device *dev);
#define GT_FREQUENCY_MULTIPLIER 50
 
#include "i915_trace.h"
 
/**
* RC6 is a special power stage which allows the GPU to enter an very
* low-voltage mode when idle, using down to 0V while at this stage. This
* stage is entered automatically when the GPU is idle when RC6 support is
* enabled, and as soon as new workload arises GPU wakes up automatically as well.
*
* There are different RC6 modes available in Intel GPU, which differentiate
* among each other with the latency required to enter and leave RC6 and
* voltage consumed by the GPU in different states.
*
* The combination of the following flags define which states GPU is allowed
* to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
* RC6pp is deepest RC6. Their support by hardware varies according to the
* GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
* which brings the most power savings; deeper states save more power, but
* require higher latency to switch to and wake up.
*/
#define INTEL_RC6_ENABLE (1<<0)
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
 
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_lvds_channel_mode __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
 
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
/* i915_dma.c */
void i915_update_dri1_breadcrumb(struct drm_device *dev);
extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
extern int i915_driver_unload(struct drm_device *);
1034,12 → 1236,15
extern void i915_driver_postclose(struct drm_device *dev,
struct drm_file *file_priv);
extern int i915_driver_device_is_agp(struct drm_device * dev);
#ifdef CONFIG_COMPAT
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#endif
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect *box,
int DR1, int DR4);
extern int i915_reset(struct drm_device *dev, u8 flags);
extern int intel_gpu_reset(struct drm_device *dev);
extern int i915_reset(struct drm_device *dev);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1049,19 → 1254,11
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
 
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_error_state_free(struct kref *error_ref);
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1078,18 → 1275,6
#endif
 
 
/* i915_mem.c */
extern int i915_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_mem_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_mem_init_heap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void i915_mem_takedown(struct mem_block **heap);
extern void i915_mem_release(struct drm_device * dev,
struct drm_file *file_priv, struct mem_block *heap);
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
1117,6 → 1302,10
struct drm_file *file_priv);
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1131,24 → 1320,45
struct drm_file *file_priv);
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable);
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
 
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
static inline dma_addr_t i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
return obj->pages.page[n];
};
 
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages.page == NULL);
obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages_pin_count == 0);
obj->pages_pin_count--;
}
 
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
1169,18 → 1379,36
return (int32_t)(seq1 - seq2) >= 0;
}
 
static inline u32
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
 
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
static inline bool
i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
return ring->outstanding_lazy_request = dev_priv->next_seqno;
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count++;
return true;
} else
return false;
}
 
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
static inline void
i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
{
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
dev_priv->fence_regs[obj->fence_reg].pin_count--;
}
}
 
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
bool interruptible);
 
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1187,18 → 1415,18
uint32_t read_domains,
uint32_t write_domain);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
void i915_gem_l3_remap(struct drm_device *dev);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_init_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_idle(struct drm_device *dev);
int __must_check i915_add_request(struct intel_ring_buffer *ring,
int i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
u32 *seqno);
int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
uint32_t seqno);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
1205,6 → 1433,8
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
1225,21 → 1455,53
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
 
 
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
 
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file, int to_id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj);
 
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
 
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, bool mappable);
int __must_check i915_gem_evict_everything(struct drm_device *dev,
bool purgeable_only);
int __must_check i915_gem_evict_inactive(struct drm_device *dev,
bool purgeable_only);
unsigned alignment,
unsigned cache_level,
bool mappable,
bool nonblock);
int i915_gem_evict_everything(struct drm_device *dev);
 
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
 
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1270,9 → 1532,20
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
 
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv);
 
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
extern inline bool intel_gmbus_is_port_valid(unsigned port)
{
return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
}
 
extern struct i2c_adapter *intel_gmbus_get_adapter(
struct drm_i915_private *dev_priv, unsigned port);
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1307,23 → 1580,24
#endif /* CONFIG_ACPI */
 
/* modesetting */
extern void intel_modeset_init_hw(struct drm_device *dev);
extern void intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev);
extern void ironlake_enable_rc6(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern int intel_enable_rc6(const struct drm_device *dev);
 
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
 
/* overlay */
#ifdef CONFIG_DEBUG_FS
1336,28 → 1610,6
struct intel_display_error_state *error);
#endif
 
#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
 
#define BEGIN_LP_RING(n) \
intel_ring_begin(LP_RING(dev_priv), (n))
 
#define OUT_RING(x) \
intel_ring_emit(LP_RING(dev_priv), x)
 
#define ADVANCE_LP_RING() \
intel_ring_advance(LP_RING(dev_priv))
 
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (LP_RING(dev->dev_private)->obj == NULL) \
LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
 
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
* returned.
1364,14 → 1616,8
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
1426,6 → 1672,7
}
 
 
#define ioread32(addr) readl(addr)
 
 
 
/drivers/video/drm/i915/i915_gem.c
25,9 → 25,8
*
*/
 
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
107,29 → 106,39
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
 
static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
bool write);
static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg);
bool map_and_fenceable,
bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file);
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
//static int i915_gem_inactive_shrink(struct shrinker *shrinker,
// struct shrink_control *sc);
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj);
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
 
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
i915_gem_release_mmap(obj);
 
/* As we do not have an associated fence register, we will force
* a tiling change if we ever need to acquire one.
*/
obj->fence_dirty = false;
obj->fence_reg = I915_FENCE_REG_NONE;
}
 
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
158,9 → 167,18
if (!atomic_read(&dev_priv->mm.wedged))
return 0;
 
ret = wait_for_completion_interruptible(x);
if (ret)
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
}
 
if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
195,28 → 213,10
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
return obj->gtt_space && !obj->active && obj->pin_count == 0;
return obj->gtt_space && !obj->active;
}
 
void i915_gem_do_init(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
 
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start);
 
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 
/* Take over this portion of the GTT */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
 
#if 0
 
int
225,12 → 225,20
{
struct drm_i915_gem_init *args = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (args->gtt_start >= args->gtt_end ||
(args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
return -EINVAL;
 
/* GEM with user mode setting was never supported on ilk and later. */
if (INTEL_INFO(dev)->gen >= 5)
return -ENODEV;
 
mutex_lock(&dev->struct_mutex);
i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
i915_gem_init_global_gtt(dev, args->gtt_start,
args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
 
return 0;
246,10 → 254,10
struct drm_i915_gem_object *obj;
size_t pinned;
 
 
pinned = 0;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
 
260,8 → 268,8
}
 
#if 0
 
int i915_gem_create(struct drm_file *file,
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
uint32_t *handle_p)
322,6 → 330,7
struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
 
return i915_gem_create(file, dev,
args->size, &args->handle);
}
334,124 → 343,232
obj->tiling_mode != I915_TILING_NONE;
}
 
static inline void
slow_shmem_copy(struct page *dst_page,
int dst_offset,
struct page *src_page,
int src_offset,
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
const char *gpu_vaddr, int gpu_offset,
int length)
{
char *dst_vaddr, *src_vaddr;
int ret, cpu_offset = 0;
 
dst_vaddr = kmap(dst_page);
src_vaddr = kmap(src_page);
while (length > 0) {
int cacheline_end = ALIGN(gpu_offset + 1, 64);
int this_length = min(cacheline_end - gpu_offset, length);
int swizzled_gpu_offset = gpu_offset ^ 64;
 
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
ret = __copy_to_user(cpu_vaddr + cpu_offset,
gpu_vaddr + swizzled_gpu_offset,
this_length);
if (ret)
return ret + length;
 
kunmap(src_page);
kunmap(dst_page);
cpu_offset += this_length;
gpu_offset += this_length;
length -= this_length;
}
 
static inline void
slow_shmem_bit17_copy(struct page *gpu_page,
int gpu_offset,
struct page *cpu_page,
int cpu_offset,
int length,
int is_read)
{
char *gpu_vaddr, *cpu_vaddr;
 
/* Use the unswizzled path if this page isn't affected. */
if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
if (is_read)
return slow_shmem_copy(cpu_page, cpu_offset,
gpu_page, gpu_offset, length);
else
return slow_shmem_copy(gpu_page, gpu_offset,
cpu_page, cpu_offset, length);
return 0;
}
 
gpu_vaddr = kmap(gpu_page);
cpu_vaddr = kmap(cpu_page);
static inline int
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
const char __user *cpu_vaddr,
int length)
{
int ret, cpu_offset = 0;
 
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
* XORing with the other bits (A9 for Y, A9 and A10 for X)
*/
while (length > 0) {
int cacheline_end = ALIGN(gpu_offset + 1, 64);
int this_length = min(cacheline_end - gpu_offset, length);
int swizzled_gpu_offset = gpu_offset ^ 64;
 
if (is_read) {
memcpy(cpu_vaddr + cpu_offset,
gpu_vaddr + swizzled_gpu_offset,
this_length);
} else {
memcpy(gpu_vaddr + swizzled_gpu_offset,
ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
cpu_vaddr + cpu_offset,
this_length);
}
if (ret)
return ret + length;
 
cpu_offset += this_length;
gpu_offset += this_length;
length -= this_length;
}
 
kunmap(cpu_page);
kunmap(gpu_page);
return 0;
}
 
/**
* This is the fast shmem pread path, which attempts to copy_from_user directly
* from the backing pages of the object to the user's address space. On a
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/
/* Per-page copy function for the shmem pread fastpath.
* Flushes invalid cachelines before reading the target if
* needs_clflush is set. */
static int
i915_gem_shmem_pread_fast(struct drm_device *dev,
shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
char *vaddr;
int ret;
 
if (unlikely(page_do_bit17_swizzling))
return -EINVAL;
 
vaddr = kmap_atomic(page);
if (needs_clflush)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
ret = __copy_to_user_inatomic(user_data,
vaddr + shmem_page_offset,
page_length);
kunmap_atomic(vaddr);
 
return ret ? -EFAULT : 0;
}
 
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
bool swizzled)
{
if (unlikely(swizzled)) {
unsigned long start = (unsigned long) addr;
unsigned long end = (unsigned long) addr + length;
 
/* For swizzling simply ensure that we always flush both
* channels. Lame, but simple and it works. Swizzled
* pwrite/pread is far from a hotpath - current userspace
* doesn't use it at all. */
start = round_down(start, 128);
end = round_up(end, 128);
 
drm_clflush_virt_range((void *)start, end - start);
} else {
drm_clflush_virt_range(addr, length);
}
 
}
 
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
char __user *user_data,
bool page_do_bit17_swizzling, bool needs_clflush)
{
char *vaddr;
int ret;
 
vaddr = kmap(page);
if (needs_clflush)
shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
page_length,
page_do_bit17_swizzling);
 
if (page_do_bit17_swizzling)
ret = __copy_to_user_swizzled(user_data,
vaddr, shmem_page_offset,
page_length);
else
ret = __copy_to_user(user_data,
vaddr + shmem_page_offset,
page_length);
kunmap(page);
 
return ret ? - EFAULT : 0;
}
 
static int
i915_gem_shmem_pread(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char __user *user_data;
ssize_t remain;
loff_t offset;
char __user *user_data;
int page_offset, page_length;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
struct scatterlist *sg;
int i;
 
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
 
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
/* If we're not in the cpu read domain, set ourself into the gtt
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
if (obj->gtt_space) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
}
}
 
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
 
i915_gem_object_pin_pages(obj);
 
offset = args->offset;
 
while (remain > 0) {
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
char *vaddr;
int ret;
 
if (i < offset >> PAGE_SHIFT)
continue;
 
if (remain <= 0)
break;
 
/* Operation in this page
*
* page_offset = offset within page
* shmem_page_offset = offset within page in shmem file
* page_length = bytes to copy for this page
*/
page_offset = offset_in_page(offset);
shmem_page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
 
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
 
vaddr = kmap_atomic(page);
ret = __copy_to_user_inatomic(user_data,
vaddr + page_offset,
page_length);
kunmap_atomic(vaddr);
ret = shmem_pread_fast(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
needs_clflush);
if (ret == 0)
goto next_page;
 
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
 
if (!prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
* data up to the first fault. Hence ignore any errors
* and just continue. */
(void)ret;
prefaulted = 1;
}
 
ret = shmem_pread_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
needs_clflush);
 
mutex_lock(&dev->struct_mutex);
 
next_page:
mark_page_accessed(page);
page_cache_release(page);
 
if (ret)
return -EFAULT;
goto out;
 
remain -= page_length;
user_data += page_length;
458,165 → 575,641
offset += page_length;
}
 
out:
i915_gem_object_unpin_pages(obj);
 
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
i915_gem_object_truncate(obj);
}
 
return ret;
}
 
/**
* Reads data from the object referenced by handle.
*
* On error, the contents of *data are undefined.
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
int ret = 0;
 
if (args->size == 0)
return 0;
 
if (!access_ok(VERIFY_WRITE,
(char __user *)(uintptr_t)args->data_ptr,
args->size))
return -EFAULT;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
/* Bounds check source. */
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
 
/* prime objects have no backing filp to GEM pread/pwrite
* pages from.
*/
if (!obj->base.filp) {
ret = -EINVAL;
goto out;
}
 
trace_i915_gem_object_pread(obj, args->offset, args->size);
 
ret = i915_gem_shmem_pread(dev, obj, args, file);
 
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
/* This is the fast write path which cannot handle
* page faults in the source data
*/
 
static inline int
fast_user_write(struct io_mapping *mapping,
loff_t page_base, int page_offset,
char __user *user_data,
int length)
{
void __iomem *vaddr_atomic;
void *vaddr;
unsigned long unwritten;
 
vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
/* We can use the cpu mem copy function because this is X86. */
vaddr = (void __force*)vaddr_atomic + page_offset;
unwritten = __copy_from_user_inatomic_nocache(vaddr,
user_data, length);
io_mapping_unmap_atomic(vaddr_atomic);
return unwritten;
}
 
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
* can copy out of the object's backing pages while holding the struct mutex
* and not take page faults.
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
*/
static int
i915_gem_shmem_pread_slow(struct drm_device *dev,
i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
int do_bit17_swizzling;
loff_t offset, page_base;
char __user *user_data;
int page_offset, page_length, ret;
 
ret = i915_gem_object_pin(obj, 0, true, true);
if (ret)
goto out;
 
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out_unpin;
 
ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin;
 
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
 
/* Pin the user pages containing the data. We can't fault while
* holding the struct mutex, yet we want to hold it while
* dereferencing the user data.
offset = obj->gtt_offset + args->offset;
 
while (remain > 0) {
/* Operation in this page
*
* page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
first_data_page = data_ptr / PAGE_SIZE;
last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
num_pages = last_data_page - first_data_page + 1;
page_base = offset & PAGE_MASK;
page_offset = offset_in_page(offset);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
 
user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
if (user_pages == NULL)
return -ENOMEM;
 
mutex_unlock(&dev->struct_mutex);
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
num_pages, 1, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
mutex_lock(&dev->struct_mutex);
if (pinned_pages < num_pages) {
/* If we get a fault while copying data, then (presumably) our
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out;
goto out_unpin;
}
 
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
remain -= page_length;
user_data += page_length;
offset += page_length;
}
 
out_unpin:
i915_gem_object_unpin(obj);
out:
return ret;
}
 
/* Per-page copy function for the shmem pwrite fastpath.
* Flushes invalid cachelines before writing to the target if
* needs_clflush_before is set and flushes out any written cachelines after
* writing if needs_clflush is set. */
static int
shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
bool needs_clflush_after)
{
char *vaddr;
int ret;
 
if (unlikely(page_do_bit17_swizzling))
return -EINVAL;
 
vaddr = kmap_atomic(page);
if (needs_clflush_before)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
ret = __copy_from_user_inatomic_nocache(vaddr + shmem_page_offset,
user_data,
page_length);
if (needs_clflush_after)
drm_clflush_virt_range(vaddr + shmem_page_offset,
page_length);
kunmap_atomic(vaddr);
 
return ret ? -EFAULT : 0;
}
 
/* Only difference to the fast-path function is that this can handle bit17
* and uses non-atomic copy and kmap functions. */
static int
shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
char __user *user_data,
bool page_do_bit17_swizzling,
bool needs_clflush_before,
bool needs_clflush_after)
{
char *vaddr;
int ret;
 
vaddr = kmap(page);
if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
page_length,
page_do_bit17_swizzling);
if (page_do_bit17_swizzling)
ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
user_data,
page_length);
else
ret = __copy_from_user(vaddr + shmem_page_offset,
user_data,
page_length);
if (needs_clflush_after)
shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
page_length,
page_do_bit17_swizzling);
kunmap(page);
 
return ret ? -EFAULT : 0;
}
 
static int
i915_gem_shmem_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
ssize_t remain;
loff_t offset;
char __user *user_data;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0;
int needs_clflush_after = 0;
int needs_clflush_before = 0;
int i;
struct scatterlist *sg;
 
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
 
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
/* If we're not in the cpu write domain, set ourself into the gtt
* write domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
if (obj->gtt_space) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto out;
return ret;
}
}
/* Same trick applies for invalidate partially written cachelines before
* writing. */
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
&& obj->cache_level == I915_CACHE_NONE)
needs_clflush_before = 1;
 
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
 
i915_gem_object_pin_pages(obj);
 
offset = args->offset;
obj->dirty = 1;
 
while (remain > 0) {
for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
struct page *page;
int partial_cacheline_write;
 
if (i < offset >> PAGE_SHIFT)
continue;
 
if (remain <= 0)
break;
 
/* Operation in this page
*
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
shmem_page_offset = offset_in_page(offset);
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = offset_in_page(data_ptr);
 
page_length = remain;
if ((shmem_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - shmem_page_offset;
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
 
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch. */
partial_cacheline_write = needs_clflush_before &&
((shmem_page_offset | page_length)
& (boot_cpu_data.x86_clflush_size - 1));
 
if (do_bit17_swizzling) {
slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
1);
} else {
slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
page,
shmem_page_offset,
page_length);
}
page = sg_page(sg);
page_do_bit17_swizzling = obj_do_bit17_swizzling &&
(page_to_phys(page) & (1 << 17)) != 0;
 
ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
if (ret == 0)
goto next_page;
 
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
user_data, page_do_bit17_swizzling,
partial_cacheline_write,
needs_clflush_after);
 
mutex_lock(&dev->struct_mutex);
 
next_page:
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
 
if (ret)
goto out;
 
remain -= page_length;
data_ptr += page_length;
user_data += page_length;
offset += page_length;
}
 
out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
mark_page_accessed(user_pages[i]);
page_cache_release(user_pages[i]);
i915_gem_object_unpin_pages(obj);
 
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
i915_gem_object_truncate(obj);
/* and flush dirty cachelines in case the object isn't in the cpu write
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
intel_gtt_chipset_flush();
}
drm_free_large(user_pages);
}
 
if (needs_clflush_after)
intel_gtt_chipset_flush();
 
return ret;
}
 
/**
* Writes data to the object referenced by handle.
*
* On error, the contents of the buffer that were to be modified are undefined.
*/
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
if (args->size == 0)
return 0;
 
if (!access_ok(VERIFY_READ,
(char __user *)(uintptr_t)args->data_ptr,
args->size))
return -EFAULT;
 
ret = fault_in_multipages_readable((char __user *)(uintptr_t)args->data_ptr,
args->size);
if (ret)
return -EFAULT;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
/* Bounds check destination. */
if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
 
/* prime objects have no backing filp to GEM pread/pwrite
* pages from.
*/
if (!obj->base.filp) {
ret = -EINVAL;
goto out;
}
 
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
if (obj->phys_obj) {
ret = i915_gem_phys_pwrite(dev, obj, args, file);
goto out;
}
 
if (obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
* textures). Fallback to the shmem path in that case. */
}
 
if (ret == -EFAULT || ret == -ENOSPC)
ret = i915_gem_shmem_pwrite(dev, obj, args, file);
 
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
#endif
 
int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
bool interruptible)
{
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
unsigned long flags;
 
/* Give the error handler a chance to run. */
spin_lock_irqsave(&x->wait.lock, flags);
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
 
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
 
/* Recovery complete, but still wedged means reset failure. */
if (recovery_complete)
return -EIO;
 
return -EAGAIN;
}
 
return 0;
}
 
/*
* Compare seqno against outstanding lazy request. Emit a request if they are
* equal.
*/
static int
i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
 
BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
ret = 0;
if (seqno == ring->outstanding_lazy_request)
ret = i915_add_request(ring, NULL, NULL);
 
return ret;
}
 
/**
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct timespec before, now, wait_time={1,0};
unsigned long timeout_jiffies;
long end;
bool wait_forever = true;
int ret;
 
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
 
trace_i915_gem_request_wait_begin(ring, seqno);
 
if (timeout != NULL) {
wait_time = *timeout;
wait_forever = false;
}
 
// timeout_jiffies = timespec_to_jiffies(&wait_time);
 
if (WARN_ON(!ring->irq_get(ring)))
return -ENODEV;
#if 0
 
/* Record current time in case interrupted by signal, or wedged * */
getrawmonotonic(&before);
 
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
do {
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
 
ret = i915_gem_check_wedge(dev_priv, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
 
getrawmonotonic(&now);
 
ring->irq_put(ring);
trace_i915_gem_request_wait_end(ring, seqno);
#undef EXIT_COND
 
if (timeout) {
// struct timespec sleep_time = timespec_sub(now, before);
// *timeout = timespec_sub(*timeout, sleep_time);
}
 
switch (end) {
case -EIO:
case -EAGAIN: /* Wedged */
case -ERESTARTSYS: /* Signal */
return (int)end;
case 0: /* Timeout */
// if (timeout)
// set_normalized_timespec(timeout, 0, 0);
return -ETIME;
default: /* Completed */
WARN_ON(end < 0); /* We're not aware of other errors */
return 0;
}
#endif
 
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
wait_event(ring->irq_queue, EXIT_COND);
#undef EXIT_COND
ring->irq_put(ring);
 
return 0;
}
 
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
*/
int
i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible = dev_priv->mm.interruptible;
int ret;
 
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
 
ret = i915_gem_check_wedge(dev_priv, interruptible);
if (ret)
return ret;
 
ret = i915_gem_check_olr(ring, seqno);
if (ret)
return ret;
 
return __wait_seqno(ring, seqno, interruptible, NULL);
}
 
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
static __must_check int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool readonly)
{
struct intel_ring_buffer *ring = obj->ring;
u32 seqno;
int ret;
 
seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
if (seqno == 0)
return 0;
 
ret = i915_wait_seqno(ring, seqno);
if (ret)
return ret;
 
i915_gem_retire_requests_ring(ring);
 
/* Manually manage the write flush as we may have not yet
* retired the buffer.
*/
if (obj->last_write_seqno &&
i915_seqno_passed(seqno, obj->last_write_seqno)) {
obj->last_write_seqno = 0;
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
}
 
return 0;
}
 
 
 
654,6 → 1247,40
 
 
 
 
 
 
 
 
 
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
*
* Preserve the reservation of the mmapping with the DRM core code, but
* relinquish ownership of the pages back to the system.
*
* It is vital that we remove the page mapping if we have mapped a tiled
* object through the GTT and then lose the fence register due to
* resource pressure. Similarly if the object has been moved out of the
* aperture, than pages mapped into userspace must be revoked. Removing the
* mapping will then trigger a page fault on the next user access, allowing
* fixup by i915_gem_fault().
*/
void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
if (!obj->fault_mappable)
return;
 
if (obj->base.dev->dev_mapping)
// unmap_mapping_range(obj->base.dev->dev_mapping,
// (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
// obj->base.size, 1);
 
obj->fault_mappable = false;
}
 
static uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
731,84 → 1358,160
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
 
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
// struct inode *inode;
 
// i915_gem_object_free_mmap_offset(obj);
 
// if (obj->base.filp == NULL)
// return;
 
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
// inode = obj->base.filp->f_path.dentry->d_inode;
// shmem_truncate_range(inode, 0, (loff_t)-1);
 
obj->madv = __I915_MADV_PURGED;
}
 
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
return obj->madv == I915_MADV_DONTNEED;
}
 
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int ret, i;
 
BUG_ON(obj->madv == __I915_MADV_PURGED);
 
ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
WARN_ON(ret != -EIO);
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
 
for (i = 0; i < obj->pages.nents; i++)
FreePage(obj->pages.page[i]);
 
obj->dirty = 0;
kfree(obj->pages.page);
}
 
static int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
 
if (obj->pages.page == NULL)
return 0;
 
BUG_ON(obj->gtt_space);
 
if (obj->pages_pin_count)
return -EBUSY;
 
ops->put_pages(obj);
obj->pages.page = NULL;
 
list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
 
return 0;
}
 
 
 
 
 
 
 
 
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
dma_addr_t page;
int page_count, i;
struct page *page;
 
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*/
page_count = obj->base.size / PAGE_SIZE;
BUG_ON(obj->pages != NULL);
obj->pages = malloc(page_count * sizeof(struct page *));
if (obj->pages == NULL)
BUG_ON(obj->pages.page != NULL);
obj->pages.page = malloc(page_count * sizeof(dma_addr_t));
if (obj->pages.page == NULL)
return -ENOMEM;
 
 
for (i = 0; i < page_count; i++) {
page = (struct page*)AllocPage(); // oh-oh
if (IS_ERR(page))
page = AllocPage(); // oh-oh
if ( page == 0 )
goto err_pages;
 
obj->pages[i] = page;
}
obj->pages.page[i] = page;
};
 
obj->pages.nents = page_count;
 
 
// if (obj->tiling_mode != I915_TILING_NONE)
// i915_gem_object_do_bit_17_swizzle(obj);
 
 
 
return 0;
 
err_pages:
while (i--)
FreePage((addr_t)obj->pages[i]);
FreePage(obj->pages.page[i]);
 
free(obj->pages);
obj->pages = NULL;
return PTR_ERR(page);
free(obj->pages.page);
obj->pages.page = NULL;
obj->pages.nents = 0;
 
return -ENOMEM;
}
 
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
/* Ensure that the associated pages are gathered from the backing storage
* and pinned into our object. i915_gem_object_get_pages() may be called
* multiple times before they are released by a single call to
* i915_gem_object_put_pages() - once the pages are no longer referenced
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
int
i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
int i;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
 
BUG_ON(obj->madv == __I915_MADV_PURGED);
if (obj->pages.page)
return 0;
 
// if (obj->tiling_mode != I915_TILING_NONE)
// i915_gem_object_save_bit_17_swizzle(obj);
BUG_ON(obj->pages_pin_count);
 
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
ret = ops->get_pages(obj);
if (ret)
return ret;
 
for (i = 0; i < page_count; i++) {
FreePage((addr_t)obj->pages[i]);
list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
return 0;
}
obj->dirty = 0;
 
free(obj->pages);
obj->pages = NULL;
}
 
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
830,135 → 1533,128
list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
 
obj->last_rendering_seqno = seqno;
obj->last_read_seqno = seqno;
 
if (obj->fenced_gpu_access) {
obj->last_fenced_seqno = seqno;
 
/* Bump MRU to take account of the delayed flush */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_fence_reg *reg;
 
BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
 
obj->last_fenced_seqno = seqno;
obj->last_fenced_ring = ring;
 
reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
}
}
 
static void
i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
{
list_del_init(&obj->ring_list);
obj->last_rendering_seqno = 0;
}
 
static void
i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
 
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
 
i915_gem_object_move_off_active(obj);
}
if (obj->pin_count) /* are we a framebuffer? */
intel_mark_fb_idle(obj);
 
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (obj->pin_count != 0)
list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
else
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
BUG_ON(!list_empty(&obj->gpu_write_list));
BUG_ON(!obj->active);
list_del_init(&obj->ring_list);
obj->ring = NULL;
 
i915_gem_object_move_off_active(obj);
obj->last_read_seqno = 0;
obj->last_write_seqno = 0;
obj->base.write_domain = 0;
 
obj->last_fenced_seqno = 0;
obj->fenced_gpu_access = false;
 
obj->active = 0;
obj->pending_gpu_write = false;
drm_gem_object_unreference(&obj->base);
 
WARN_ON(i915_verify_lists(dev));
}
 
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
static u32
i915_gem_get_seqno(struct drm_device *dev)
{
struct inode *inode;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno = dev_priv->next_seqno;
 
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
* backing pages, *now*.
*/
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
 
obj->madv = __I915_MADV_PURGED;
return seqno;
}
 
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
u32
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
{
return obj->madv == I915_MADV_DONTNEED;
}
if (ring->outstanding_lazy_request == 0)
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
 
static void
i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
uint32_t flush_domains)
{
struct drm_i915_gem_object *obj, *next;
 
list_for_each_entry_safe(obj, next,
&ring->gpu_write_list,
gpu_write_list) {
if (obj->base.write_domain & flush_domains) {
uint32_t old_write_domain = obj->base.write_domain;
 
obj->base.write_domain = 0;
list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_active(obj, ring,
i915_gem_next_request_seqno(ring));
 
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain);
return ring->outstanding_lazy_request;
}
}
}
 
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request)
u32 *out_seqno)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
uint32_t seqno;
struct drm_i915_gem_request *request;
u32 request_ring_position;
u32 seqno;
int was_empty;
int ret;
 
BUG_ON(request == NULL);
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
* things up similar to emitting the lazy request. The difference here
* is that the flush _must_ happen before the next request, no matter
* what.
*/
ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
 
request = kmalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
 
seqno = i915_gem_next_request_seqno(ring);
 
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
* position of the head.
*/
request_ring_position = intel_ring_get_tail(ring);
 
ret = ring->add_request(ring, &seqno);
if (ret)
if (ret) {
kfree(request);
return ret;
}
 
trace_i915_gem_request_add(ring, seqno);
 
request->seqno = seqno;
request->ring = ring;
request->emitted_jiffies = jiffies;
request->tail = request_ring_position;
request->emitted_jiffies = GetTimerTicks();
was_empty = list_empty(&ring->request_list);
list_add_tail(&request->list, &ring->request_list);
request->file_priv = NULL;
 
 
ring->outstanding_lazy_request = false;
ring->outstanding_lazy_request = 0;
 
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
966,10 → 1662,15
// jiffies +
// msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
if (was_empty)
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
intel_mark_busy(dev_priv->dev);
}
}
 
if (out_seqno)
*out_seqno = seqno;
return 0;
}
 
976,28 → 1677,81
 
 
 
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
request = list_first_entry(&ring->request_list,
struct drm_i915_gem_request,
list);
 
list_del(&request->list);
// i915_gem_request_remove_from_client(request);
kfree(request);
}
 
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
 
i915_gem_object_move_to_inactive(obj);
}
}
 
static void i915_gem_reset_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
 
i915_gem_write_fence(dev, i, NULL);
 
if (reg->obj)
i915_gem_object_fence_lost(reg->obj);
 
reg->pin_count = 0;
reg->obj = NULL;
INIT_LIST_HEAD(&reg->lru_list);
}
 
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
}
 
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
 
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
 
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
list_for_each_entry(obj,
&dev_priv->mm.inactive_list,
mm_list)
{
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
 
/* The fence registers are invalidated so clear them out */
i915_gem_reset_fences(dev);
}
 
 
 
/**
* This function clears the request list as sequence numbers are passed.
*/
static void
void
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
1008,7 → 1762,7
 
WARN_ON(i915_verify_lists(ring->dev));
 
seqno = ring->get_seqno(ring);
seqno = ring->get_seqno(ring, true);
 
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
1025,6 → 1779,12
break;
 
trace_i915_gem_request_retire(ring, request->seqno);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* of tail of the request to update the last known position
* of the GPU head.
*/
ring->last_retired_head = request->tail;
 
list_del(&request->list);
kfree(request);
1040,12 → 1800,9
struct drm_i915_gem_object,
ring_list);
 
if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
 
if (obj->base.write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
}
 
1062,31 → 1819,19
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
 
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
struct drm_i915_gem_object *obj, *next;
 
/* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests.
* Currently:
* retire -> free -> unbind -> wait -> retire_ring
*/
list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list,
mm_list)
i915_gem_free_object_tail(obj);
for_each_ring(ring, dev_priv, i)
i915_gem_retire_requests_ring(ring);
}
 
for (i = 0; i < I915_NUM_RINGS; i++)
i915_gem_retire_requests_ring(&dev_priv->ring[i]);
}
 
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
struct drm_device *dev;
struct intel_ring_buffer *ring;
bool idle;
int i;
 
1109,26 → 1854,17
* objects indefinitely.
*/
idle = true;
for (i = 0; i < I915_NUM_RINGS; i++) {
struct intel_ring_buffer *ring = &dev_priv->ring[i];
for_each_ring(ring, dev_priv, i) {
if (ring->gpu_caches_dirty)
i915_add_request(ring, NULL, NULL);
 
if (!list_empty(&ring->gpu_write_list)) {
struct drm_i915_gem_request *request;
int ret;
 
ret = i915_gem_flush_ring(ring,
0, I915_GEM_GPU_DOMAINS);
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (ret || request == NULL ||
i915_add_request(ring, NULL, request))
kfree(request);
}
 
idle &= list_empty(&ring->request_list);
}
 
if (!dev_priv->mm.suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
if (idle)
intel_mark_idle(dev);
 
mutex_unlock(&dev->struct_mutex);
// LEAVE();
1135,121 → 1871,76
}
 
/**
* Waits for a sequence number to be signaled, and cleans up the
* request and object lists appropriately for that event.
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
* completed requests.
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
uint32_t seqno)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
int ret = 0;
int ret;
 
BUG_ON(seqno == 0);
 
// if (atomic_read(&dev_priv->mm.wedged)) {
// struct completion *x = &dev_priv->error_completion;
// bool recovery_complete;
// unsigned long flags;
 
/* Give the error handler a chance to run. */
// spin_lock_irqsave(&x->wait.lock, flags);
// recovery_complete = x->done > 0;
// spin_unlock_irqrestore(&x->wait.lock, flags);
//
// return recovery_complete ? -EIO : -EAGAIN;
// }
 
if (seqno == ring->outstanding_lazy_request) {
struct drm_i915_gem_request *request;
 
request = kzalloc(sizeof(*request), GFP_KERNEL);
if (request == NULL)
return -ENOMEM;
 
ret = i915_add_request(ring, NULL, request);
if (ret) {
kfree(request);
if (obj->active) {
ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
if (ret)
return ret;
}
 
seqno = request->seqno;
i915_gem_retire_requests_ring(obj->ring);
}
 
if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(ring->dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
// ring->dev->driver->irq_preinstall(ring->dev);
// ring->dev->driver->irq_postinstall(ring->dev);
return 0;
}
 
trace_i915_gem_request_wait_begin(ring, seqno);
 
ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
// printf("enter wait\n");
wait_event(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
 
ring->irq_put(ring);
} else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
seqno) ||
atomic_read(&dev_priv->mm.wedged), 3000))
ret = -EBUSY;
ring->waiting_seqno = 0;
 
trace_i915_gem_request_wait_end(ring, seqno);
}
if (atomic_read(&dev_priv->mm.wedged))
ret = -EAGAIN;
 
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
__func__, ret, seqno, ring->get_seqno(ring),
dev_priv->next_seqno);
 
/* Directly dispatch request retiring. While we have the work queue
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*/
if (ret == 0)
i915_gem_retire_requests_ring(ring);
 
return ret;
}
 
 
 
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
* i915_gem_object_sync - sync an object to a ring.
*
* @obj: object which may be in use on another ring.
* @to: ring we wish to use the object on. May be NULL.
*
* This code is meant to abstract object synchronization with the GPU.
* Calling with NULL implies synchronizing the object with the CPU
* rather than a particular GPU ring.
*
* Returns 0 if successful, else propagates up the lower layer error.
*/
int
i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
{
int ret;
struct intel_ring_buffer *from = obj->ring;
u32 seqno;
int ret, idx;
 
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
if (from == NULL || to == from)
return 0;
 
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
if (obj->active) {
ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
return i915_gem_object_wait_rendering(obj, false);
 
idx = intel_ring_sync_index(from, to);
 
seqno = obj->last_read_seqno;
if (seqno <= from->sync_seqno[idx])
return 0;
 
ret = i915_gem_check_olr(obj->ring, seqno);
if (ret)
return ret;
}
 
return 0;
ret = to->sync_to(to, from, seqno);
if (!ret)
from->sync_seqno[idx] = seqno;
 
return ret;
}
 
static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1282,18 → 1973,19
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
 
if (obj->gtt_space == NULL)
return 0;
 
if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
if (obj->pin_count)
return -EBUSY;
 
BUG_ON(obj->pages.page == NULL);
 
ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS)
if (ret)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
1302,34 → 1994,23
 
i915_gem_object_finish_gtt(obj);
 
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it.
*/
if (ret == 0)
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret == -ERESTARTSYS)
return ret;
if (ret) {
/* In the event of a disaster, abandon all caches and
* hope for the best.
*/
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
 
/* release the fence reg _after_ flushing */
ret = i915_gem_object_put_fence(obj);
if (ret == -ERESTARTSYS)
if (ret)
return ret;
 
trace_i915_gem_object_unbind(obj);
 
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj);
if (obj->has_aliasing_ppgtt_mapping) {
i915_ppgtt_unbind_object(dev_priv->mm.aliasing_ppgtt, obj);
obj->has_aliasing_ppgtt_mapping = 0;
}
i915_gem_gtt_finish_object(obj);
 
list_del_init(&obj->gtt_list);
list_del_init(&obj->mm_list);
list_del(&obj->mm_list);
list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
 
1337,121 → 2018,210
obj->gtt_space = NULL;
obj->gtt_offset = 0;
 
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
 
return ret;
}
 
int
i915_gem_flush_ring(struct intel_ring_buffer *ring,
uint32_t invalidate_domains,
uint32_t flush_domains)
{
int ret;
 
if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
 
ret = ring->flush(ring, invalidate_domains, flush_domains);
if (ret)
return ret;
 
if (flush_domains & I915_GEM_GPU_DOMAINS)
i915_gem_process_flushing_list(ring, flush_domains);
 
return 0;
}
 
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
int ret;
 
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
if (list_empty(&ring->active_list))
return 0;
 
if (!list_empty(&ring->gpu_write_list)) {
ret = i915_gem_flush_ring(ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
 
return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
}
 
int
i915_gpu_idle(struct drm_device *dev)
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i;
 
/* Flush everything onto the inactive list. */
for (i = 0; i < I915_NUM_RINGS; i++) {
ret = i915_ring_idle(&dev_priv->ring[i]);
for_each_ring(ring, dev_priv, i) {
ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID);
if (ret)
return ret;
 
ret = i915_ring_idle(ring);
if (ret)
return ret;
}
 
return 0;
}
 
static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint64_t val;
 
if (obj) {
u32 size = obj->gtt_space->size;
 
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
}
 
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint64_t val;
 
if (obj) {
u32 size = obj->gtt_space->size;
 
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
POSTING_READ(FENCE_REG_965_0 + reg * 8);
}
 
static void i915_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 val;
 
if (obj) {
u32 size = obj->gtt_space->size;
int pitch_val;
int tile_width;
 
WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
(obj->gtt_offset & (size - 1)),
"object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
obj->gtt_offset, obj->map_and_fenceable, size);
 
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
 
/* Note: pitch better be a power of two tile widths */
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
 
val = obj->gtt_offset;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
} else
val = 0;
 
if (reg < 8)
reg = FENCE_REG_830_0 + reg * 4;
else
reg = FENCE_REG_945_8 + (reg - 8) * 4;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
}
 
static void i830_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t val;
 
if (obj) {
u32 size = obj->gtt_space->size;
uint32_t pitch_val;
 
WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
(obj->gtt_offset & (size - 1)),
"object 0x%08x not 512K or pot-size 0x%08x aligned\n",
obj->gtt_offset, size);
 
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
 
static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
val = obj->gtt_offset;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
 
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
return i915_seqno_passed(ring->get_seqno(ring), seqno);
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
default: break;
}
}
 
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined)
static inline int fence_number(struct drm_i915_private *dev_priv,
struct drm_i915_fence_reg *fence)
{
int ret;
 
if (obj->fenced_gpu_access) {
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->last_fenced_ring,
0, obj->base.write_domain);
if (ret)
return ret;
return fence - dev_priv->fence_regs;
}
 
obj->fenced_gpu_access = false;
static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int reg = fence_number(dev_priv, fence);
 
i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
 
if (enable) {
obj->fence_reg = reg;
fence->obj = obj;
list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
} else {
obj->fence_reg = I915_FENCE_REG_NONE;
fence->obj = NULL;
list_del_init(&fence->lru_list);
}
}
 
if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
obj->last_fenced_seqno);
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
if (ret)
return ret;
}
 
obj->last_fenced_seqno = 0;
obj->last_fenced_ring = NULL;
}
 
/* Ensure that all CPU reads are completed before installing a fence
1460,6 → 2230,7
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
 
obj->fenced_gpu_access = false;
return 0;
}
 
1466,95 → 2237,189
int
i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
 
// if (obj->tiling_mode)
// i915_gem_release_mmap(obj);
 
ret = i915_gem_object_flush_fence(obj, NULL);
ret = i915_gem_object_flush_fence(obj);
if (ret)
return ret;
 
if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
i915_gem_clear_fence_reg(obj->base.dev,
&dev_priv->fence_regs[obj->fence_reg]);
if (obj->fence_reg == I915_FENCE_REG_NONE)
return 0;
 
obj->fence_reg = I915_FENCE_REG_NONE;
}
i915_gem_object_update_fence(obj,
&dev_priv->fence_regs[obj->fence_reg],
false);
i915_gem_object_fence_lost(obj);
 
return 0;
}
 
static struct drm_i915_fence_reg *
i915_find_fence_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_fence_reg *reg, *avail;
int i;
 
/* First try to find a free reg */
avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
return reg;
 
if (!reg->pin_count)
avail = reg;
}
 
if (avail == NULL)
return NULL;
 
/* None available, try to steal one or wait for a user to finish */
list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
if (reg->pin_count)
continue;
 
return reg;
}
 
return NULL;
}
 
/**
* i915_gem_object_get_fence - set up fencing for an object
* @obj: object to map through a fence reg
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
* This function walks the fence regs looking for a free one for @obj,
* stealing one if it can't find any.
*
* It then sets up the reg based on the object's properties: address, pitch
* and tiling format.
*
* For an untiled surface, this removes any existing fence.
*/
int
i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = obj->tiling_mode != I915_TILING_NONE;
struct drm_i915_fence_reg *reg;
int ret;
 
/* Have we updated the tiling parameters upon the object and so
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
ret = i915_gem_object_flush_fence(obj);
if (ret)
return ret;
}
 
/* Just update our place in the LRU if our fence is getting reused. */
if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj->fence_reg];
if (!obj->fence_dirty) {
list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list);
return 0;
}
} else if (enable) {
reg = i915_find_fence_reg(dev);
if (reg == NULL)
return -EDEADLK;
 
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
 
ret = i915_gem_object_flush_fence(old);
if (ret)
return ret;
 
i915_gem_object_fence_lost(old);
}
} else
return 0;
 
i915_gem_object_update_fence(obj, reg, enable);
obj->fence_dirty = false;
 
return 0;
}
 
static bool i915_gem_valid_gtt_space(struct drm_device *dev,
struct drm_mm_node *gtt_space,
unsigned long cache_level)
{
struct drm_mm_node *other;
 
/* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher
* crossing memory domains and dieing.
*/
if (HAS_LLC(dev))
return true;
 
if (gtt_space == NULL)
return true;
 
if (list_empty(&gtt_space->node_list))
return true;
 
other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
if (other->allocated && !other->hole_follows && other->color != cache_level)
return false;
 
other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
return false;
 
return true;
}
 
 
 
 
 
 
 
 
 
/**
* i915_gem_clear_fence_reg - clear out fence register info
* @obj: object to clear
*
* Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj.
*/
static void
i915_gem_clear_fence_reg(struct drm_device *dev,
struct drm_i915_fence_reg *reg)
static void i915_gem_verify_gtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t fence_reg = reg - dev_priv->fence_regs;
#if WATCH_GTT
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int err = 0;
 
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
case 5:
case 4:
I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
break;
case 3:
if (fence_reg >= 8)
fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
else
case 2:
fence_reg = FENCE_REG_830_0 + fence_reg * 4;
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
if (obj->gtt_space == NULL) {
printk(KERN_ERR "object found on GTT list with no space reserved\n");
err++;
continue;
}
 
I915_WRITE(fence_reg, 0);
break;
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
obj->gtt_space->start,
obj->gtt_space->start + obj->gtt_space->size,
obj->cache_level,
obj->gtt_space->color);
err++;
continue;
}
 
list_del_init(&reg->lru_list);
reg->obj = NULL;
reg->setup_seqno = 0;
if (!i915_gem_valid_gtt_space(dev,
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
obj->gtt_space->start,
obj->gtt_space->start + obj->gtt_space->size,
obj->cache_level);
err++;
continue;
}
}
 
WARN_ON(err);
#endif
}
 
/**
* Finds free space in the GTT aperture and binds the object there.
*/
1561,12 → 2426,12
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment,
bool map_and_fenceable)
bool map_and_fenceable,
bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
gfp_t gfpmask = 0; //__GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
1606,32 → 2471,36
return -E2BIG;
}
 
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
 
search_free:
if (map_and_fenceable)
free_space =
drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
size, alignment, 0,
dev_priv->mm.gtt_mappable_end,
0);
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
size, alignment, 0);
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
false);
 
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, 0,
dev_priv->mm.gtt_mappable_end,
0);
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
obj->gtt_space =
drm_mm_get_block(free_space, size, alignment);
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
if (obj->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
ret = 1; //i915_gem_evict_something(dev, size, alignment,
// map_and_fenceable);
if (ret)
1639,53 → 2508,28
 
goto search_free;
}
 
ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) {
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
obj->gtt_space,
obj->cache_level))) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
#if 0
if (ret == -ENOMEM) {
/* first try to reclaim some memory by clearing the GTT */
ret = i915_gem_evict_everything(dev, false);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
gfpmask = 0;
goto search_free;
return -EINVAL;
}
 
return -ENOMEM;
}
 
goto search_free;
}
#endif
return ret;
}
 
ret = i915_gem_gtt_bind_object(obj);
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
 
// if (i915_gem_evict_everything(dev, false))
return ret;
 
// goto search_free;
}
 
list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
 
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
obj->gtt_offset = obj->gtt_space->start;
 
fenceable =
1698,6 → 2542,7
obj->map_and_fenceable = mappable && fenceable;
 
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
}
 
1708,7 → 2553,7
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (obj->pages == NULL)
if (obj->pages.page == NULL)
return;
 
/* If the GPU is snooping the contents of the CPU cache,
1740,13 → 2585,13
page_virtual = AllocKernelSpace(obj->base.size);
if(page_virtual != NULL)
{
u32_t *src, *dst;
dma_addr_t *src, *dst;
u32 count;
 
#define page_tabs 0xFDC00000 /* really dirty hack */
 
src = (u32_t*)obj->pages;
dst = &((u32_t*)page_tabs)[(u32_t)page_virtual >> 12];
src = obj->pages.page;
dst = &((dma_addr_t*)page_tabs)[(u32_t)page_virtual >> 12];
count = obj->base.size/4096;
 
while(count--)
1770,17 → 2615,6
}
}
 
/** Flushes any GPU write domain for the object if it's dirty. */
static int
i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
/* Queue the GPU write cache flushing we need. */
return i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
}
 
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
1836,6 → 2670,7
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
uint32_t old_write_domain, old_read_domains;
int ret;
 
1846,16 → 2681,10
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
return 0;
 
ret = i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
 
if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
 
i915_gem_object_flush_cpu_write_domain(obj);
 
old_write_domain = obj->base.write_domain;
1876,6 → 2705,10
old_read_domains,
old_write_domain);
 
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
return 0;
}
 
1882,6 → 2715,8
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
if (obj->cache_level == cache_level)
1892,6 → 2727,12
return -EBUSY;
}
 
if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
 
if (obj->gtt_space) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
1903,13 → 2744,19
* registers with snooped memory, so relinquish any fences
* currently pointing to our region in the aperture.
*/
if (INTEL_INFO(obj->base.dev)->gen < 6) {
if (INTEL_INFO(dev)->gen < 6) {
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
}
 
i915_gem_gtt_rebind_object(obj, cache_level);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, cache_level);
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
 
obj->gtt_space->color = cache_level;
}
 
if (cache_level == I915_CACHE_NONE) {
1936,6 → 2783,7
}
 
obj->cache_level = cache_level;
i915_gem_verify_gtt(dev);
return 0;
}
 
1943,11 → 2791,6
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
* any flushes to be pipelined (for pageflips).
*
* For the display plane, we want to be in the GTT but out of any write
* domains. So in many ways this looks like set_to_gtt_domain() apart from the
* ability to pipeline the waits, pinning and any additional subtleties
* that may differentiate the display plane from ordinary buffers.
*/
int
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1957,14 → 2800,10
u32 old_read_domains, old_write_domain;
int ret;
 
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (pipelined != obj->ring) {
ret = i915_gem_object_sync(obj, pipelined);
if (ret)
return ret;
 
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj);
if (ret == -ERESTARTSYS)
return ret;
}
 
/* The display engine is not coherent with the LLC cache on gen6. As
1984,7 → 2823,7
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
ret = i915_gem_object_pin(obj, alignment, true);
ret = i915_gem_object_pin(obj, alignment, true, false);
if (ret)
return ret;
 
1996,7 → 2835,7
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
trace_i915_gem_object_change_domain(obj,
2014,16 → 2853,13
if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
 
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring, 0, obj->base.write_domain);
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
}
 
/* Ensure that we invalidate the GPU's caches and TLBs. */
obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 
return i915_gem_object_wait_rendering(obj);
return 0;
}
 
/**
2032,7 → 2868,7
* This function returns when the move is complete, including waiting on
* flushes to occur.
*/
static int
int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
2041,17 → 2877,12
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return 0;
 
ret = i915_gem_object_flush_gpu_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj, !write);
if (ret)
return ret;
 
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
 
i915_gem_object_flush_gtt_write_domain(obj);
 
 
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
 
2082,67 → 2913,62
return 0;
}
 
/**
* Moves the object from a partially CPU read to a full one.
#if 0
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
* Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
* Note that if we were to use the current jiffies each time around the loop,
* we wouldn't escape the function with any frames outstanding if the time to
* render a frame was over 20ms.
*
* This should get us reasonable parallelism between CPU and GPU but also
* relatively low latency when blocking on a particular request to finish.
*/
static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
static int
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
{
if (!obj->page_cpu_valid)
return;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
unsigned long recent_enough = GetTimerTics() - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
u32 seqno = 0;
int ret;
 
/* If we're partially in the CPU read domain, finish moving it in.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
}
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
 
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
kfree(obj->page_cpu_valid);
obj->page_cpu_valid = NULL;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
 
ring = request->ring;
seqno = request->seqno;
}
spin_unlock(&file_priv->mm.lock);
 
if (seqno == 0)
return 0;
 
ret = __wait_seqno(ring, seqno, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
 
int gem_object_lock(struct drm_i915_gem_object *obj)
{
return i915_gem_object_set_to_cpu_domain(obj, true);
return ret;
}
#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable)
bool map_and_fenceable,
bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
WARN_ON(i915_verify_lists(dev));
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
 
#if 0
if (obj->gtt_space != NULL) {
2164,19 → 2990,18
 
if (obj->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable);
map_and_fenceable,
nonblocking);
if (ret)
return ret;
}
 
if (obj->pin_count++ == 0) {
if (!obj->active)
list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list);
}
if (!obj->has_global_gtt_mapping && map_and_fenceable)
i915_gem_gtt_bind_object(obj, obj->cache_level);
 
obj->pin_count++;
obj->pin_mappable |= map_and_fenceable;
 
WARN_ON(i915_verify_lists(dev));
return 0;
}
 
2183,56 → 3008,223
void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
 
WARN_ON(i915_verify_lists(dev));
BUG_ON(obj->pin_count == 0);
BUG_ON(obj->gtt_space == NULL);
 
if (--obj->pin_count == 0) {
if (!obj->active)
list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
if (--obj->pin_count == 0)
obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
 
#if 0
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
 
if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
 
obj->user_pin_count++;
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
 
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj->gtt_offset;
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
obj->user_pin_count--;
if (obj->user_pin_count == 0) {
obj->pin_filp = NULL;
i915_gem_object_unpin(obj);
}
 
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
ret = i915_gem_object_flush_active(obj);
 
args->busy = obj->active;
if (obj->ring) {
BUILD_BUG_ON(I915_NUM_RINGS > 16);
args->busy |= intel_ring_flag(obj->ring) << 16;
}
 
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
return i915_gem_ring_throttle(dev, file_priv);
}
 
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
int ret;
 
switch (args->madv) {
case I915_MADV_DONTNEED:
case I915_MADV_WILLNEED:
break;
default:
return -EINVAL;
}
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
if (obj->pin_count) {
ret = -EINVAL;
goto out;
}
 
if (obj->madv != __I915_MADV_PURGED)
obj->madv = args->madv;
 
/* if the object is no longer attached, discard its backing storage */
if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
i915_gem_object_truncate(obj);
 
args->retained = obj->madv != __I915_MADV_PURGED;
 
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
#endif
 
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
 
obj->ops = ops;
 
obj->fence_reg = I915_FENCE_REG_NONE;
obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */
obj->map_and_fenceable = true;
 
i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
}
 
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
 
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
u32 mask;
 
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
2244,13 → 3236,13
}
 
 
i915_gem_info_add_obj(dev_priv, size);
i915_gem_object_init(obj, &i915_gem_object_ops);
 
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* On Gen6, we can have the GPU use the LLC (the CPU
if (HAS_LLC(dev)) {
/* On some devices, we can have the GPU use the LLC (the CPU
* cache) for about a 10% performance improvement
* compared to uncached. Graphics requests other than
* display scanout are coherent with the CPU in
2266,17 → 3258,6
} else
obj->cache_level = I915_CACHE_NONE;
 
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
/* Avoid an unnecessary call to unbind on the first bind. */
obj->map_and_fenceable = true;
 
return obj;
}
 
2287,63 → 3268,234
return 0;
}
 
static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
trace_i915_gem_object_destroy(obj);
 
// if (obj->phys_obj)
// i915_gem_detach_phys_object(dev, obj);
 
obj->pin_count = 0;
if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
bool was_interruptible;
 
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
 
WARN_ON(i915_gem_object_unbind(obj));
 
dev_priv->mm.interruptible = was_interruptible;
}
 
trace_i915_gem_object_destroy(obj);
obj->pages_pin_count = 0;
i915_gem_object_put_pages(obj);
// i915_gem_object_free_mmap_offset(obj);
 
// if (obj->base.map_list.map)
// drm_gem_free_mmap_offset(&obj->base);
BUG_ON(obj->pages.page);
 
// if (obj->base.import_attach)
// drm_prime_gem_destroy(&obj->base, NULL);
 
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
kfree(obj->page_cpu_valid);
kfree(obj->bit_17);
kfree(obj);
}
 
void i915_gem_free_object(struct drm_gem_object *gem_obj)
#if 0
int
i915_gem_idle(struct drm_device *dev)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
mutex_lock(&dev->struct_mutex);
 
// if (obj->phys_obj)
// i915_gem_detach_phys_object(dev, obj);
if (dev_priv->mm.suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
 
i915_gem_free_object_tail(obj);
ret = i915_gpu_idle(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
i915_gem_retire_requests(dev);
 
i915_gem_reset_fences(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->hangcheck_timer);
 
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
 
mutex_unlock(&dev->struct_mutex);
 
/* Cancel the retire work handler, which should be idle now. */
// cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
return 0;
}
#endif
 
void i915_gem_l3_remap(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 misccpctl;
int i;
 
if (!IS_IVYBRIDGE(dev))
return;
 
if (!dev_priv->mm.l3_remap_info)
return;
 
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
 
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->mm.l3_remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
}
 
/* Make sure all the writes land before disabling dop clock gating */
POSTING_READ(GEN7_L3LOG_BASE);
 
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
}
 
void i915_gem_init_swizzling(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen < 5 ||
dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
return;
 
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
 
if (IS_GEN5(dev))
return;
 
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
 
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
 
if (!dev_priv->mm.aliasing_ppgtt)
return;
 
 
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = ppgtt->pt_pages[i];
 
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
 
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
 
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
 
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
 
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
 
static bool
intel_enable_blt(struct drm_device *dev)
{
if (!HAS_BLT(dev))
return false;
 
/* The blitter was dysfunctional on early prototypes */
if (IS_GEN6(dev) && dev->pdev->revision < 8) {
DRM_INFO("BLT not supported on this pre-production hardware;"
" graphics performance will be degraded.\n");
return false;
}
 
return true;
}
 
int
i915_gem_init_ringbuffer(struct drm_device *dev)
i915_gem_init_hw(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
if (!intel_enable_gtt())
return -EIO;
 
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
 
i915_gem_l3_remap(dev);
 
i915_gem_init_swizzling(dev);
 
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
2354,7 → 3506,7
goto cleanup_render_ring;
}
 
if (HAS_BLT(dev)) {
if (intel_enable_blt(dev)) {
ret = intel_init_blt_ring_buffer(dev);
if (ret)
goto cleanup_bsd_ring;
2362,6 → 3514,13
 
dev_priv->next_seqno = 1;
 
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
*/
i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
 
return 0;
 
cleanup_bsd_ring:
2371,23 → 3530,88
return ret;
}
 
#if 0
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
 
return true;
}
 
#define LFB_SIZE 0xC00000
 
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
int ret;
 
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
mutex_lock(&dev->struct_mutex);
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
 
i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE);
 
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE);
}
 
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
i915_gem_cleanup_aliasing_ppgtt(dev);
return ret;
}
 
return 0;
}
 
void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
 
for (i = 0; i < I915_NUM_RINGS; i++)
intel_cleanup_ring_buffer(&dev_priv->ring[i]);
for_each_ring(ring, dev_priv, i)
intel_cleanup_ring_buffer(ring);
}
 
#if 0
 
int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret, i;
int ret;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
2400,7 → 3624,7
mutex_lock(&dev->struct_mutex);
dev_priv->mm.suspended = 0;
 
ret = i915_gem_init_ringbuffer(dev);
ret = i915_gem_init_hw(dev);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
2407,12 → 3631,6
}
 
BUG_ON(!list_empty(&dev_priv->mm.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
for (i = 0; i < I915_NUM_RINGS; i++) {
BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
}
mutex_unlock(&dev->struct_mutex);
 
ret = drm_irq_install(dev);
2460,7 → 3678,6
{
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
}
 
void
2470,12 → 3687,10
drm_i915_private_t *dev_priv = dev->dev_private;
 
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < I915_MAX_NUM_FENCES; i++)
2485,13 → 3700,9
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
u32 tmp = I915_READ(MI_ARB_STATE);
if (!(tmp & MI_ARB_C3_LP_WRITE_ENABLE)) {
/* arb state is a masked write, so set bit + bit in mask */
tmp = MI_ARB_C3_LP_WRITE_ENABLE | (MI_ARB_C3_LP_WRITE_ENABLE << MI_ARB_MASK_SHIFT);
I915_WRITE(MI_ARB_STATE, tmp);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
}
}
 
dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
2501,9 → 3712,7
dev_priv->num_fence_regs = 8;
 
/* Initialize fence registers to zero */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
}
i915_gem_reset_fences(dev);
 
i915_gem_detect_bit_6_swizzle(dev);
 
/drivers/video/drm/i915/i915_gem_context.c
0,0 → 1,552
/*
* Copyright © 2011-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Ben Widawsky <ben@bwidawsk.net>
*
*/
 
/*
* This file implements HW context support. On gen5+ a HW context consists of an
* opaque GPU object which is referenced at times of context saves and restores.
* With RC6 enabled, the context is also referenced as the GPU enters and exists
* from RC6 (GPU has it's own internal power context, except on gen5). Though
* something like a context does exist for the media ring, the code only
* supports contexts for the render ring.
*
* In software, there is a distinction between contexts created by the user,
* and the default HW context. The default HW context is used by GPU clients
* that do not request setup of their own hardware context. The default
* context's state is never restored to help prevent programming errors. This
* would happen if a client ran and piggy-backed off another clients GPU state.
* The default context only exists to give the GPU some offset to load as the
* current to invoke a save of the context we actually care about. In fact, the
* code could likely be constructed, albeit in a more complicated fashion, to
* never use the default context, though that limits the driver's ability to
* swap out, and/or destroy other contexts.
*
* All other contexts are created as a request by the GPU client. These contexts
* store GPU state, and thus allow GPU clients to not re-emit state (and
* potentially query certain state) at any time. The kernel driver makes
* certain that the appropriate commands are inserted.
*
* The context life cycle is semi-complicated in that context BOs may live
* longer than the context itself because of the way the hardware, and object
* tracking works. Below is a very crude representation of the state machine
* describing the context life.
* refcount pincount active
* S0: initial state 0 0 0
* S1: context created 1 0 0
* S2: context is currently running 2 1 X
* S3: GPU referenced, but not current 2 0 1
* S4: context is current, but destroyed 1 1 0
* S5: like S3, but destroyed 1 0 1
*
* The most common (but not all) transitions:
* S0->S1: client creates a context
* S1->S2: client submits execbuf with context
* S2->S3: other clients submits execbuf with context
* S3->S1: context object was retired
* S3->S2: clients submits another execbuf
* S2->S4: context destroy called with current context
* S3->S5->S0: destroy path
* S4->S5->S0: destroy path on current context
*
* There are two confusing terms used above:
* The "current context" means the context which is currently running on the
* GPU. The GPU has loaded it's state already and has stored away the gtt
* offset of the BO. The GPU is not actively referencing the data at this
* offset, but it will on the next context switch. The only way to avoid this
* is to do a GPU reset.
*
* An "active context' is one which was previously the "current context" and is
* on the active list waiting for the next context switch to occur. Until this
* happens, the object must remain at the same gtt offset. It is therefore
* possible to destroy a context, but it is still active.
*
*/
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
/* This is a HW constraint. The value below is the largest known requirement
* I've seen in a spec to date, and that was a workaround for a non-shipping
* part. It should be safe to decrease this, but it's more future proof as is.
*/
#define CONTEXT_ALIGN (64<<10)
 
#if 0
 
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
static int do_switch(struct i915_hw_context *to);
 
static int get_context_size(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
u32 reg;
 
switch (INTEL_INFO(dev)->gen) {
case 6:
reg = I915_READ(CXT_SIZE);
ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
break;
case 7:
reg = I915_READ(GEN7_CXT_SIZE);
if (IS_HASWELL(dev))
ret = HSW_CXT_TOTAL_SIZE(reg) * 64;
else
ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
break;
default:
BUG();
}
 
return ret;
}
 
static void do_destroy(struct i915_hw_context *ctx)
{
struct drm_device *dev = ctx->obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
else
BUG_ON(ctx != dev_priv->ring[RCS].default_context);
 
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
}
 
static struct i915_hw_context *
create_hw_context(struct drm_device *dev,
struct drm_i915_file_private *file_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
int ret, id;
 
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
 
ctx->obj = i915_gem_alloc_object(dev, dev_priv->hw_context_size);
if (ctx->obj == NULL) {
kfree(ctx);
DRM_DEBUG_DRIVER("Context object allocated failed\n");
return ERR_PTR(-ENOMEM);
}
 
/* The ring associated with the context object is handled by the normal
* object tracking code. We give an initial ring value simple to pass an
* assertion in the context switch code.
*/
ctx->ring = &dev_priv->ring[RCS];
 
/* Default context will never have a file_priv */
if (file_priv == NULL)
return ctx;
 
ctx->file_priv = file_priv;
 
again:
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
ret = -ENOMEM;
DRM_DEBUG_DRIVER("idr allocation failed\n");
goto err_out;
}
 
ret = idr_get_new_above(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_ID + 1, &id);
if (ret == 0)
ctx->id = id;
 
if (ret == -EAGAIN)
goto again;
else if (ret)
goto err_out;
 
return ctx;
 
err_out:
do_destroy(ctx);
return ERR_PTR(ret);
}
 
static inline bool is_default_context(struct i915_hw_context *ctx)
{
return (ctx == ctx->ring->default_context);
}
 
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
* well as an idle case.
*/
static int create_default_context(struct drm_i915_private *dev_priv)
{
struct i915_hw_context *ctx;
int ret;
 
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
ctx = create_hw_context(dev_priv->dev, NULL);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
 
/* We may need to do things with the shrinker which require us to
* immediately switch back to the default context. This can cause a
* problem as pinning the default context also requires GTT space which
* may not be available. To avoid this we always pin the
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret)
goto err_destroy;
 
ret = do_switch(ctx);
if (ret)
goto err_unpin;
 
DRM_DEBUG_DRIVER("Default HW context loaded\n");
return 0;
 
err_unpin:
i915_gem_object_unpin(ctx->obj);
err_destroy:
do_destroy(ctx);
return ret;
}
#endif
 
void i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ctx_size;
 
dev_priv->hw_contexts_disabled = true;
return;
 
#if 0
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
return;
}
 
/* If called from reset, or thaw... we've been here already */
if (dev_priv->hw_contexts_disabled ||
dev_priv->ring[RCS].default_context)
return;
 
ctx_size = get_context_size(dev);
dev_priv->hw_context_size = get_context_size(dev);
dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
 
if (ctx_size <= 0 || ctx_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
return;
}
 
if (create_default_context(dev_priv)) {
dev_priv->hw_contexts_disabled = true;
return;
}
 
DRM_DEBUG_DRIVER("HW context support initialized\n");
#endif
 
}
 
#if 0
void i915_gem_context_fini(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->hw_contexts_disabled)
return;
 
/* The only known way to stop the gpu from accessing the hw context is
* to reset it. Do this as the very last operation to avoid confusing
* other code, leading to spurious errors. */
intel_gpu_reset(dev);
 
i915_gem_object_unpin(dev_priv->ring[RCS].default_context->obj);
 
do_destroy(dev_priv->ring[RCS].default_context);
}
 
static int context_idr_cleanup(int id, void *p, void *data)
{
struct i915_hw_context *ctx = p;
 
BUG_ON(id == DEFAULT_CONTEXT_ID);
 
do_destroy(ctx);
 
return 0;
}
 
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
 
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
mutex_unlock(&dev->struct_mutex);
}
 
static struct i915_hw_context *
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
{
return (struct i915_hw_context *)idr_find(&file_priv->context_idr, id);
}
 
static inline int
mi_set_context(struct intel_ring_buffer *ring,
struct i915_hw_context *new_context,
u32 hw_flags)
{
int ret;
 
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
* explicitly, so we rely on the value at ring init, stored in
* itlb_before_ctx_switch.
*/
if (IS_GEN6(ring->dev) && ring->itlb_before_ctx_switch) {
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, 0);
if (ret)
return ret;
}
 
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
 
if (IS_GEN7(ring->dev))
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
else
intel_ring_emit(ring, MI_NOOP);
 
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, new_context->obj->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
hw_flags);
/* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP */
intel_ring_emit(ring, MI_NOOP);
 
if (IS_GEN7(ring->dev))
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
else
intel_ring_emit(ring, MI_NOOP);
 
intel_ring_advance(ring);
 
return ret;
}
 
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
struct drm_i915_gem_object *from_obj = ring->last_context_obj;
u32 hw_flags = 0;
int ret;
 
BUG_ON(from_obj != NULL && from_obj->pin_count == 0);
 
if (from_obj == to->obj)
return 0;
 
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
 
/* Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
* XXX: We need a real interface to do this instead of trickery. */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
return ret;
}
 
if (!to->obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(to->obj, to->obj->cache_level);
 
if (!to->is_initialized || is_default_context(to))
hw_flags |= MI_RESTORE_INHIBIT;
else if (WARN_ON_ONCE(from_obj == to->obj)) /* not yet expected */
hw_flags |= MI_FORCE_RESTORE;
 
ret = mi_set_context(ring, to, hw_flags);
if (ret) {
i915_gem_object_unpin(to->obj);
return ret;
}
 
/* The backing object for the context is done after switching to the
* *next* context. Therefore we cannot retire the previous context until
* the next context has already started running. In fact, the below code
* is a bit suboptimal because the retiring can occur simply after the
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
* correct in case the object gets swapped out. Ideally we'd be
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
from_obj->dirty = 1;
BUG_ON(from_obj->ring != ring);
i915_gem_object_unpin(from_obj);
 
drm_gem_object_unreference(&from_obj->base);
}
 
drm_gem_object_reference(&to->obj->base);
ring->last_context_obj = to->obj;
to->is_initialized = true;
 
return 0;
}
#endif
 
/**
* i915_switch_context() - perform a GPU context switch.
* @ring: ring for which we'll execute the context switch
* @file_priv: file_priv associated with the context, may be NULL
* @id: context id number
* @seqno: sequence number by which the new context will be switched to
* @flags:
*
* The context life cycle is simple. The context refcount is incremented and
* decremented by 1 and create and destroy. If the context is in use by the GPU,
* it will have a refoucnt > 1. This allows us to destroy the context abstract
* object while letting the normal object tracking destroy the backing BO.
*/
int i915_switch_context(struct intel_ring_buffer *ring,
struct drm_file *file,
int to_id)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *to;
 
if (dev_priv->hw_contexts_disabled)
return 0;
 
#if 0
if (ring != &dev_priv->ring[RCS])
return 0;
 
if (to_id == DEFAULT_CONTEXT_ID) {
to = ring->default_context;
} else {
if (file == NULL)
return -EINVAL;
 
to = i915_gem_context_get(file->driver_priv, to_id);
if (to == NULL)
return -ENOENT;
}
 
return do_switch(to);
#endif
 
return 0;
}
 
#if 0
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_context_create *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
int ret;
 
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
if (dev_priv->hw_contexts_disabled)
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
ctx = create_hw_context(dev, file_priv);
mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
 
args->ctx_id = ctx->id;
DRM_DEBUG_DRIVER("HW context %d created\n", args->ctx_id);
 
return 0;
}
 
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_context_destroy *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_hw_context *ctx;
int ret;
 
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
ctx = i915_gem_context_get(file_priv, args->ctx_id);
if (!ctx) {
mutex_unlock(&dev->struct_mutex);
return -ENOENT;
}
 
do_destroy(ctx);
 
mutex_unlock(&dev->struct_mutex);
 
DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
return 0;
}
 
#endif
/drivers/video/drm/i915/i915_gem_gtt.c
22,9 → 22,8
*
*/
 
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
33,6 → 32,220
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
uint32_t *pt_vaddr;
uint32_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
 
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
 
pt_vaddr = AllocKernelSpace(4096);
 
if(pt_vaddr != NULL)
{
while (num_entries)
{
last_pte = first_pte + num_entries;
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
 
MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
 
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
 
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pd++;
}
FreeKernelSpace(pt_vaddr);
};
}
 
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
 
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ret;
 
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
goto err_ppgtt;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = AllocPage();
if (!ppgtt->pt_pages[i])
goto err_pt_alloc;
}
 
/*
if (dev_priv->mm.gtt->needs_dmar) {
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
 
if (pci_dma_mapping_error(dev->pdev,
pt_addr)) {
ret = -EIO;
goto err_pd_pin;
 
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
}
*/
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
 
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
 
dev_priv->mm.aliasing_ppgtt = ppgtt;
 
return 0;
 
err_pd_pin:
// if (ppgtt->pt_dma_addr) {
// for (i--; i >= 0; i--)
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
// 4096, PCI_DMA_BIDIRECTIONAL);
// }
err_pt_alloc:
// kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
FreePage(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
err_ppgtt:
kfree(ppgtt);
 
return ret;
}
 
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
 
if (!ppgtt)
return;
 
// if (ppgtt->pt_dma_addr) {
// for (i = 0; i < ppgtt->num_pd_entries; i++)
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
// 4096, PCI_DMA_BIDIRECTIONAL);
// }
 
// kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
FreePage(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
 
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct pagelist *pages,
unsigned first_entry,
uint32_t pte_flags)
{
uint32_t *pt_vaddr, pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j;
dma_addr_t page_addr;
 
i = 0;
 
pt_vaddr = AllocKernelSpace(4096);
 
if( pt_vaddr != NULL)
{
while (i < pages->nents)
{
MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
 
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
page_addr = pages->page[i];
pte = GEN6_PTE_ADDR_ENCODE(page_addr);
pt_vaddr[j] = pte | pte_flags;
}
 
first_pte = 0;
act_pd++;
}
FreeKernelSpace(pt_vaddr);
};
}
 
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
uint32_t pte_flags = GEN6_PTE_VALID;
 
switch (cache_level) {
case I915_CACHE_LLC_MLC:
pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
 
i915_ppgtt_insert_sg_entries(ppgtt,
&obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
pte_flags);
}
 
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
i915_ppgtt_clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
 
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
85,9 → 298,9
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_rebind_object(obj, obj->cache_level);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
 
intel_gtt_chipset_flush();
94,57 → 307,41
}
#endif
 
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
int ret;
if (obj->has_dma_mapping)
return 0;
 
// if (dev_priv->mm.gtt->needs_dmar) {
// ret = intel_gtt_map_memory(obj->pages,
// obj->base.size >> PAGE_SHIFT,
// &obj->sg_list,
// &obj->num_sg);
// if (ret != 0)
// return ret;
// if (!dma_map_sg(&obj->base.dev->pdev->dev,
// obj->pages->sgl, obj->pages->nents,
// PCI_DMA_BIDIRECTIONAL))
// return -ENOSPC;
 
// intel_gtt_insert_sg_entries(obj->sg_list,
// obj->num_sg,
// obj->gtt_space->start >> PAGE_SHIFT,
// agp_type);
// } else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
 
return 0;
}
 
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
 
// if (dev_priv->mm.gtt->needs_dmar) {
// BUG_ON(!obj->sg_list);
 
// intel_gtt_insert_sg_entries(obj->sg_list,
// obj->num_sg,
// obj->gtt_space->start >> PAGE_SHIFT,
// agp_type);
// } else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
intel_gtt_insert_sg_entries(&obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
obj->has_global_gtt_mapping = 1;
}
 
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
 
obj->has_global_gtt_mapping = 0;
}
 
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
151,13 → 348,49
 
interruptible = do_idling(dev_priv);
 
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
// if (!obj->has_dma_mapping)
// dma_unmap_sg(&dev->pdev->dev,
// obj->pages->sgl, obj->pages->nents,
// PCI_DMA_BIDIRECTIONAL);
 
if (obj->sg_list) {
// intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
undo_idling(dev_priv, interruptible);
}
 
undo_idling(dev_priv, interruptible);
static void i915_gtt_color_adjust(struct drm_mm_node *node,
unsigned long color,
unsigned long *start,
unsigned long *end)
{
if (node->color != color)
*start += 4096;
 
if (!list_empty(&node->node_list)) {
node = list_entry(node->node_list.next,
struct drm_mm_node,
node_list);
if (node->allocated && node->color != color)
*end -= 4096;
}
}
 
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
 
/* Substract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
 
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 
/* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
/drivers/video/drm/i915/i915_gem_stolen.c
0,0 → 1,201
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
* use by the OS and so the user finds that his system has less memory
* available than he put in. We refer to this memory as stolen.
*
* The BIOS will allocate its framebuffer from the stolen memory. Our
* goal is try to reuse that object for our own fbcon which must always
* be available for panics. Anything else we can reuse the stolen memory
* for is a boon.
*/
 
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
 
/**
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
* a physical one
* @dev: drm device
* @offset: address to translate
*
* Some chip functions require allocations from stolen space and need the
* physical address of the memory in question.
*/
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
 
#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so compute the base by subtracting the stolen memory
* from the Top of Low Usable DRAM which is where the BIOS places
* the graphics stolen memory.
*/
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* top 32bits are reserved = 0 */
pci_read_config_dword(pdev, 0xA4, &base);
} else {
/* XXX presume 8xx is the same as i915 */
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
}
#else
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
u16 val;
pci_read_config_word(pdev, 0xb0, &val);
base = val >> 4 << 20;
} else {
u8 val;
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
}
base -= dev_priv->mm.gtt->stolen_size;
#endif
 
return base + offset;
}
 
static void i915_warn_stolen(struct drm_device *dev)
{
DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
}
 
static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
 
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
 
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
 
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
if (!cfb_base)
goto err_fb;
 
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
compressed_llb = drm_mm_get_block(compressed_llb,
4096, 4096);
if (!compressed_llb)
goto err_fb;
 
ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
if (!ll_base)
goto err_llb;
}
 
dev_priv->cfb_size = size;
 
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
 
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
cfb_base, ll_base, size >> 20);
return;
 
err_llb:
drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
}
 
static void i915_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
drm_mm_put_block(dev_priv->compressed_fb);
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
}
 
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
}
 
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
 
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
 
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
 
/* Leave 1M for line length buffer & misc. */
 
/* Try to get a 32M buffer... */
if (prealloc_size > (36*1024*1024))
cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
 
return 0;
}
/drivers/video/drm/i915/i915_gem_tiling.c
25,11 → 25,10
*
*/
 
#include "linux/string.h"
#include "linux/bitops.h"
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <linux/string.h>
#include <linux/bitops.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
/** @file i915_gem_tiling.c
110,9 → 109,27
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
if (INTEL_INFO(dev)->gen >= 6) {
if (IS_VALLEYVIEW(dev)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (INTEL_INFO(dev)->gen >= 6) {
uint32_t dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
/* Enable swizzling when the channels are populated with
* identically sized dimms. We don't need to check the 3rd
* channel because no cpu with gpu attached ships in that
* configuration. Also, swizzling only makes sense for 2
* channels anyway. */
if (dimm_c0 == dimm_c1) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else if (IS_GEN5(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
125,10 → 142,10
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev)) {
} else if (IS_MOBILE(dev) || (IS_GEN3(dev) && !IS_G33(dev))) {
uint32_t dcc;
 
/* On mobile 9xx chipsets, channel interleave by the CPU is
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
358,9 → 375,15
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
* need to ensure that any fence register is updated before
* the next fenced (either through the GTT or by the BLT unit
* on older GPUs) access.
*
* After updating the tiling parameters, we then flag whether
* we need to update an associated fence register. Note this
* has to also include the unfenced register the GPU uses
* whilst executing a fenced command for an untiled object.
*/
i915_gem_release_mmap(obj);
 
obj->map_and_fenceable =
obj->gtt_space == NULL ||
378,9 → 401,15
}
 
if (ret == 0) {
obj->tiling_changed = true;
obj->fence_dirty =
obj->fenced_gpu_access ||
obj->fence_reg != I915_FENCE_REG_NONE;
 
obj->tiling_mode = args->tiling_mode;
obj->stride = args->stride;
 
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
}
}
/* we have to maintain this existing ABI... */
462,6 → 491,7
void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
 
468,12 → 498,13
if (obj->bit_17 == NULL)
return;
 
for (i = 0; i < page_count; i++) {
char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
for_each_sg(obj->pages->sgl, sg, page_count, i) {
struct page *page = sg_page(sg);
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(obj->pages[i]);
set_page_dirty(obj->pages[i]);
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
}
}
481,6 → 512,7
void
i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
struct scatterlist *sg;
int page_count = obj->base.size >> PAGE_SHIFT;
int i;
 
494,8 → 526,9
}
}
 
for (i = 0; i < page_count; i++) {
if (page_to_phys(obj->pages[i]) & (1 << 17))
for_each_sg(obj->pages->sgl, sg, page_count, i) {
struct page *page = sg_page(sg);
if (page_to_phys(page) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
/drivers/video/drm/i915/i915_irq.c
26,15 → 26,22
*
*/
 
#define pr_fmt(fmt) ": " fmt
 
#include <linux/irqreturn.h>
//#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
 
 
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
#define DRM_IRQ_ARGS void *arg
 
#define DRM_WAKEUP( queue ) wake_up( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
 
87,19 → 94,137
POSTING_READ(DEIMR);
}
}
 
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != mask) {
u32 reg = PIPESTAT(pipe);
 
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
POSTING_READ(reg);
}
}
 
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
{
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = PIPESTAT(pipe);
 
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
POSTING_READ(reg);
}
}
 
#if 0
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
*/
void intel_enable_asle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
 
/* FIXME: opregion/asle for VLV */
if (IS_VALLEYVIEW(dev))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
else {
i915_enable_pipestat(dev_priv, 1,
PIPE_LEGACY_BLC_EVENT_ENABLE);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
 
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
#endif
 
/**
* i915_pipe_enabled - check if a pipe is enabled
* @dev: DRM device
* @pipe: pipe to check
*
* Reading certain registers when the pipe is disabled can hang the chip.
* Use this routine to make sure the PLL is running and the pipe is active
* before reading such registers if unsure.
*/
static int
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
}
 
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
unsigned long low_frame;
u32 high1, high2, low;
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
 
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
 
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
 
high1 >>= PIPE_FRAME_HIGH_SHIFT;
low >>= PIPE_FRAME_LOW_SHIFT;
return (high1 << 8) | low;
}
 
static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
 
if (!i915_pipe_enabled(dev, pipe)) {
DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
"pipe %c\n", pipe_name(pipe));
return 0;
}
 
return I915_READ(reg);
}
 
 
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno;
 
if (ring->obj == NULL)
return;
 
seqno = ring->get_seqno(ring);
trace_i915_gem_request_complete(ring, seqno);
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
 
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
// if (i915_enable_hangcheck) {
// dev_priv->hangcheck_count = 0;
109,22 → 234,410
// }
}
 
#if 0
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
u32 pm_iir, pm_imr;
u8 new_delay;
 
spin_lock_irq(&dev_priv->rps.lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
pm_imr = I915_READ(GEN6_PMIMR);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps.lock);
 
static int ironlake_irq_handler(struct drm_device *dev)
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
 
mutex_lock(&dev_priv->dev->struct_mutex);
 
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
else
new_delay = dev_priv->rps.cur_delay - 1;
 
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
if (!(new_delay > dev_priv->rps.max_delay ||
new_delay < dev_priv->rps.min_delay)) {
gen6_set_rps(dev_priv->dev, new_delay);
}
 
mutex_unlock(&dev_priv->dev->struct_mutex);
}
 
 
/**
* ivybridge_parity_work - Workqueue called when a parity error interrupt
* occurred.
* @work: workqueue struct
*
* Doesn't actually do anything except notify userspace. As a consequence of
* this event, userspace should try to remap the bad rows since statistically
* it is likely the same row is more likely to go bad again.
*/
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
parity_error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
unsigned long flags;
 
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
* any time we access those registers.
*/
mutex_lock(&dev_priv->dev->struct_mutex);
 
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
POSTING_READ(GEN7_MISCCPCTL);
 
error_status = I915_READ(GEN7_L3CDERRST1);
row = GEN7_PARITY_ERROR_ROW(error_status);
bank = GEN7_PARITY_ERROR_BANK(error_status);
subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
 
I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
GEN7_L3CDERRST1_ENABLE);
POSTING_READ(GEN7_L3CDERRST1);
 
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
mutex_unlock(&dev_priv->dev->struct_mutex);
 
parity_event[0] = "L3_PARITY_ERROR=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
parity_event[4] = NULL;
 
kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
KOBJ_CHANGE, parity_event);
 
DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
row, bank, subbank);
 
kfree(parity_event[3]);
kfree(parity_event[2]);
kfree(parity_event[1]);
}
 
static void ivybridge_handle_parity_error(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long flags;
 
if (!HAS_L3_GPU_CACHE(dev))
return;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
queue_work(dev_priv->wq, &dev_priv->parity_error_work);
}
 
#endif
 
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
 
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GEN6_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
 
if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_ERROR_INTERRUPT)) {
DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
i915_handle_error(dev, false);
}
 
// if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
// ivybridge_handle_parity_error(dev);
}
 
static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
u32 pm_iir)
{
unsigned long flags;
 
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
* dev_priv->rps.pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
* The mask bit in IMR is cleared by dev_priv->rps.work.
*/
 
spin_lock_irqsave(&dev_priv->rps.lock, flags);
dev_priv->rps.pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir, gt_iir, pm_iir;
irqreturn_t ret = IRQ_NONE;
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
 
atomic_inc(&dev_priv->irq_received);
 
while (true) {
iir = I915_READ(VLV_IIR);
gt_iir = I915_READ(GTIIR);
pm_iir = I915_READ(GEN6_PMIIR);
 
if (gt_iir == 0 && pm_iir == 0 && iir == 0)
goto out;
 
ret = IRQ_HANDLED;
 
snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/*
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
#if 0
for_each_pipe(pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
drm_handle_vblank(dev, pipe);
 
if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
intel_prepare_page_flip(dev, pipe);
intel_finish_page_flip(dev, pipe);
}
}
#endif
 
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
// if (hotplug_status & dev_priv->hotplug_supported_mask)
// queue_work(dev_priv->wq,
// &dev_priv->hotplug_work);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
 
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
// gen6_queue_rps_work(dev_priv, pm_iir);
 
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
I915_WRITE(VLV_IIR, iir);
}
 
out:
return ret;
}
 
static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
 
if (pch_iir & SDE_GMBUS)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
 
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
 
if (pch_iir & SDE_AUDIO_TRANS_MASK)
DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
 
if (pch_iir & SDE_POISON)
DRM_ERROR("PCH poison interrupt\n");
 
if (pch_iir & SDE_FDI_MASK)
for_each_pipe(pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
 
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
 
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
 
if (pch_iir & SDE_TRANSB_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
 
static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
 
if (pch_iir & SDE_AUX_MASK_CPT)
DRM_DEBUG_DRIVER("AUX channel interrupt\n");
 
if (pch_iir & SDE_GMBUS_CPT)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
 
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
 
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
 
if (pch_iir & SDE_FDI_MASK_CPT)
for_each_pipe(pipe)
DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
pipe_name(pipe),
I915_READ(FDI_RX_IIR(pipe)));
}
 
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, pm_iir;
irqreturn_t ret = IRQ_NONE;
int i;
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
 
de_iir = I915_READ(DEIIR);
if (de_iir) {
#if 0
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
 
for (i = 0; i < 3; i++) {
if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
}
#endif
/* check event from PCH */
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
 
// if (pch_iir & SDE_HOTPLUG_MASK_CPT)
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
 
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
 
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
 
pm_iir = I915_READ(GEN6_PMIIR);
if (pm_iir) {
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
// gen6_queue_rps_work(dev_priv, pm_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
ret = IRQ_HANDLED;
}
 
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
 
return ret;
}
 
static void ilk_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
}
 
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
 
atomic_inc(&dev_priv->irq_received);
 
if (IS_GEN6(dev))
bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
 
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
146,65 → 659,47
 
ret = IRQ_HANDLED;
 
if (IS_GEN5(dev))
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
else
snb_gt_irq_handler(dev, dev_priv, gt_iir);
#if 0
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
 
if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & bsd_usr_interrupt)
notify_ring(dev, &dev_priv->ring[VCS]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[BCS]);
if (de_iir & DE_PIPEA_VBLANK)
drm_handle_vblank(dev, 0);
 
// if (de_iir & DE_GSE)
// intel_opregion_gse_intr(dev);
if (de_iir & DE_PIPEB_VBLANK)
drm_handle_vblank(dev, 1);
 
// if (de_iir & DE_PLANEA_FLIP_DONE) {
// intel_prepare_page_flip(dev, 0);
// intel_finish_page_flip_plane(dev, 0);
// }
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
}
 
// if (de_iir & DE_PLANEB_FLIP_DONE) {
// intel_prepare_page_flip(dev, 1);
// intel_finish_page_flip_plane(dev, 1);
// }
if (de_iir & DE_PLANEB_FLIP_DONE) {
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip_plane(dev, 1);
}
#endif
 
// if (de_iir & DE_PIPEA_VBLANK)
// drm_handle_vblank(dev, 0);
 
// if (de_iir & DE_PIPEB_VBLANK)
// drm_handle_vblank(dev, 1);
 
/* check event from PCH */
// if (de_iir & DE_PCH_EVENT) {
if (de_iir & DE_PCH_EVENT) {
// if (pch_iir & hotplug_mask)
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
// pch_irq_handler(dev);
// }
 
// if (de_iir & DE_PCU_EVENT) {
// I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
// i915_handle_rps_change(dev);
// }
 
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) {
/*
* IIR bits should never already be set because IMR should
* prevent an interrupt from being shown in IIR. The warning
* displays a case where we've unsafely cleared
* dev_priv->pm_iir. Although missing an interrupt of the same
* type is not a problem, it displays a problem in the logic.
*
* The mask bit in IMR is cleared by rps_work.
*/
unsigned long flags;
spin_lock_irqsave(&dev_priv->rps_lock, flags);
WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
dev_priv->pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
// queue_work(dev_priv->wq, &dev_priv->rps_work);
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
ibx_irq_handler(dev, pch_iir);
}
#if 0
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
ironlake_handle_rps_change(dev);
 
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
#endif
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
221,11 → 716,790
 
 
 
/* NB: please notice the memset */
static void i915_get_extra_instdone(struct drm_device *dev,
uint32_t *instdone)
{
struct drm_i915_private *dev_priv = dev->dev_private;
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
 
switch(INTEL_INFO(dev)->gen) {
case 2:
case 3:
instdone[0] = I915_READ(INSTDONE);
break;
case 4:
case 5:
case 6:
instdone[0] = I915_READ(INSTDONE_I965);
instdone[1] = I915_READ(INSTDONE1);
break;
default:
WARN(1, "Unsupported platform\n");
case 7:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
break;
}
}
 
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src)
{
struct drm_i915_error_object *dst;
int i, count;
u32 reloc_offset;
 
if (src == NULL || src->pages == NULL)
return NULL;
 
count = src->base.size / PAGE_SIZE;
 
dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
 
reloc_offset = src->gtt_offset;
for (i = 0; i < count; i++) {
unsigned long flags;
void *d;
 
d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
if (d == NULL)
goto unwind;
 
local_irq_save(flags);
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
 
/* Simply ignore tiling or any overlapping fence.
* It's part of the error state, and this hopefully
* captures what the GPU read.
*/
 
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else {
struct page *page;
void *s;
 
page = i915_gem_object_get_page(src, i);
 
drm_clflush_pages(&page, 1);
 
s = kmap_atomic(page);
memcpy(d, s, PAGE_SIZE);
kunmap_atomic(s);
 
drm_clflush_pages(&page, 1);
}
local_irq_restore(flags);
 
dst->pages[i] = d;
 
reloc_offset += PAGE_SIZE;
}
dst->page_count = count;
dst->gtt_offset = src->gtt_offset;
 
return dst;
 
unwind:
while (i--)
kfree(dst->pages[i]);
kfree(dst);
return NULL;
}
 
static void
i915_error_object_free(struct drm_i915_error_object *obj)
{
int page;
 
if (obj == NULL)
return;
 
for (page = 0; page < obj->page_count; page++)
kfree(obj->pages[page]);
 
kfree(obj);
}
 
void
i915_error_state_free(struct kref *error_ref)
{
struct drm_i915_error_state *error = container_of(error_ref,
typeof(*error), ref);
int i;
 
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
kfree(error->ring[i].requests);
}
 
kfree(error->active_bo);
kfree(error->overlay);
kfree(error);
}
static void capture_bo(struct drm_i915_error_buffer *err,
struct drm_i915_gem_object *obj)
{
err->size = obj->base.size;
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
err->gtt_offset = obj->gtt_offset;
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
err->pinned = 0;
if (obj->pin_count > 0)
err->pinned = 1;
if (obj->user_pin_count > 0)
err->pinned = -1;
err->tiling = obj->tiling_mode;
err->dirty = obj->dirty;
err->purgeable = obj->madv != I915_MADV_WILLNEED;
err->ring = obj->ring ? obj->ring->id : -1;
err->cache_level = obj->cache_level;
}
 
static u32 capture_active_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
 
list_for_each_entry(obj, head, mm_list) {
capture_bo(err++, obj);
if (++i == count)
break;
}
 
return i;
}
 
static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
int count, struct list_head *head)
{
struct drm_i915_gem_object *obj;
int i = 0;
 
list_for_each_entry(obj, head, gtt_list) {
if (obj->pin_count == 0)
continue;
 
capture_bo(err++, obj);
if (++i == count)
break;
}
 
return i;
}
 
static void i915_gem_record_fences(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
 
}
}
 
static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
u32 seqno;
 
if (!ring->get_seqno)
return NULL;
 
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
continue;
 
if (i915_seqno_passed(seqno, obj->last_read_seqno))
continue;
 
if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
continue;
 
/* We need to copy these to an anonymous buffer as the simplest
* method to avoid being overwritten by userspace.
*/
return i915_error_object_create(dev_priv, obj);
}
 
return NULL;
}
 
static void i915_record_ring_state(struct drm_device *dev,
struct drm_i915_error_state *error,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (INTEL_INFO(dev)->gen >= 6) {
error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
error->semaphore_mboxes[ring->id][0]
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
}
 
if (INTEL_INFO(dev)->gen >= 4) {
error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
if (ring->id == RCS)
error->bbaddr = I915_READ64(BB_ADDR);
} else {
error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
error->ipeir[ring->id] = I915_READ(IPEIR);
error->ipehr[ring->id] = I915_READ(IPEHR);
error->instdone[ring->id] = I915_READ(INSTDONE);
}
 
error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
error->seqno[ring->id] = ring->get_seqno(ring, false);
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
 
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
}
 
static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
struct drm_i915_gem_request *request;
int i, count;
 
for_each_ring(ring, dev_priv, i) {
i915_record_ring_state(dev, error, ring);
 
error->ring[i].batchbuffer =
i915_error_first_batchbuffer(dev_priv, ring);
 
error->ring[i].ringbuffer =
i915_error_object_create(dev_priv, ring->obj);
 
count = 0;
list_for_each_entry(request, &ring->request_list, list)
count++;
 
error->ring[i].num_requests = count;
error->ring[i].requests =
kmalloc(count*sizeof(struct drm_i915_error_request),
GFP_ATOMIC);
if (error->ring[i].requests == NULL) {
error->ring[i].num_requests = 0;
continue;
}
 
count = 0;
list_for_each_entry(request, &ring->request_list, list) {
struct drm_i915_error_request *erq;
 
erq = &error->ring[i].requests[count++];
erq->seqno = request->seqno;
erq->jiffies = request->emitted_jiffies;
erq->tail = request->tail;
}
}
}
 
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
*
* Should be called when an error is detected (either a hang or an error
* interrupt) to capture error state from the time of the error. Fills
* out a structure which becomes available in debugfs for user level tools
* to pick up.
*/
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
unsigned long flags;
int i, pipe;
 
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
if (error)
return;
 
/* Account for pipe specific data like PIPE*STAT */
error = kzalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
 
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
dev->primary->index);
 
kref_init(&error->ref);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->ccid = I915_READ(CCID);
 
if (HAS_PCH_SPLIT(dev))
error->ier = I915_READ(DEIER) | I915_READ(GTIER);
else if (IS_VALLEYVIEW(dev))
error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
else if (IS_GEN2(dev))
error->ier = I915_READ16(IER);
else
error->ier = I915_READ(IER);
 
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
if (INTEL_INFO(dev)->gen >= 6) {
error->error = I915_READ(ERROR_GEN6);
error->done_reg = I915_READ(DONE_REG);
}
 
if (INTEL_INFO(dev)->gen == 7)
error->err_int = I915_READ(GEN7_ERR_INT);
 
i915_get_extra_instdone(dev, error->extra_instdone);
 
i915_gem_record_fences(dev, error);
i915_gem_record_rings(dev, error);
 
/* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
error->pinned_bo = NULL;
 
i = 0;
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
i++;
error->active_bo_count = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
 
error->active_bo = NULL;
error->pinned_bo = NULL;
if (i) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
GFP_ATOMIC);
if (error->active_bo)
error->pinned_bo =
error->active_bo + error->active_bo_count;
}
 
if (error->active_bo)
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
&dev_priv->mm.active_list);
 
if (error->pinned_bo)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
&dev_priv->mm.bound_list);
 
do_gettimeofday(&error->time);
 
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
 
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
error = NULL;
}
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
if (error)
i915_error_state_free(&error->ref);
}
 
void i915_destroy_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
if (error)
kref_put(&error->ref, i915_error_state_free);
}
#else
#define i915_capture_error_state(x)
#endif
 
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t instdone[I915_NUM_INSTDONE_REG];
u32 eir = I915_READ(EIR);
int pipe, i;
 
if (!eir)
return;
 
pr_err("render error detected, EIR: 0x%08x\n", eir);
 
i915_get_extra_instdone(dev, instdone);
 
if (IS_G4X(dev)) {
if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
u32 ipeir = I915_READ(IPEIR_I965);
 
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
 
if (!IS_GEN2(dev)) {
if (eir & I915_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
pr_err("page table error\n");
pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
POSTING_READ(PGTBL_ER);
}
}
 
if (eir & I915_ERROR_MEMORY_REFRESH) {
pr_err("memory refresh error:\n");
for_each_pipe(pipe)
pr_err("pipe %c stat: 0x%08x\n",
pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
/* pipestat has already been acked */
}
if (eir & I915_ERROR_INSTRUCTION) {
pr_err("instruction error\n");
pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
for (i = 0; i < ARRAY_SIZE(instdone); i++)
pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
if (INTEL_INFO(dev)->gen < 4) {
u32 ipeir = I915_READ(IPEIR);
 
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
 
pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
POSTING_READ(IPEIR_I965);
}
}
 
I915_WRITE(EIR, eir);
POSTING_READ(EIR);
eir = I915_READ(EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
I915_WRITE(EMR, I915_READ(EMR) | eir);
I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
}
}
 
/**
* i915_handle_error - handle an error interrupt
* @dev: drm device
*
* Do some basic checking of regsiter state at error interrupt time and
* dump it to the syslog. Also call i915_capture_error_state() to make
* sure we get a record and make it available in debugfs. Fire a uevent
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int i;
 
i915_capture_error_state(dev);
i915_report_and_clear_eir(dev);
 
if (wedged) {
// INIT_COMPLETION(dev_priv->error_completion);
atomic_set(&dev_priv->mm.wedged, 1);
 
/*
* Wakeup waiting processes so they don't hang
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
}
 
// queue_work(dev_priv->wq, &dev_priv->error_work);
}
 
#if 0
 
 
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
 
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
 
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
 
if (work == NULL || work->pending || !work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
 
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
obj->gtt_offset;
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
 
spin_unlock_irqrestore(&dev->event_lock, flags);
 
if (stall_detected) {
DRM_DEBUG_DRIVER("Pageflip stall detected\n");
intel_prepare_page_flip(dev, intel_crtc->plane);
}
}
 
#endif
 
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
 
/* maintain vblank delivery even in deep C-states */
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_display_irq(dev_priv,
DE_PIPEA_VBLANK_IVB << (5 * pipe));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
u32 imr;
 
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
imr = I915_READ(VLV_IMR);
if (pipe == 0)
imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
I915_WRITE(VLV_IMR, imr);
i915_enable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
return 0;
}
 
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (dev_priv->info->gen == 3)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
 
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_disable_display_irq(dev_priv,
DE_PIPEA_VBLANK_IVB << (pipe * 5));
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
u32 imr;
 
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_disable_pipestat(dev_priv, pipe,
PIPE_START_VBLANK_INTERRUPT_ENABLE);
imr = I915_READ(VLV_IMR);
if (pipe == 0)
imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
else
imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
I915_WRITE(VLV_IMR, imr);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
 
static u32
ring_last_seqno(struct intel_ring_buffer *ring)
{
return list_entry(ring->request_list.prev,
struct drm_i915_gem_request, list)->seqno;
}
/* drm_dma.h hooks
*/
static void ironlake_irq_preinstall(struct drm_device *dev)
234,25 → 1508,8
 
atomic_set(&dev_priv->irq_received, 0);
 
// INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
// INIT_WORK(&dev_priv->error_work, i915_error_work_func);
// if (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
// INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
I915_WRITE(HWSTAM, 0xeffe);
 
if (IS_GEN6(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
* MI_USER_INTERRUPT. This appears to serialize the
* previous seqno write out before the interrupt
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
}
 
/* XXX hotplug from PCH */
 
I915_WRITE(DEIMR, 0xffffffff);
270,6 → 1527,38
POSTING_READ(SDEIER);
}
 
static void valleyview_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
/* VLV magic */
I915_WRITE(VLV_IMR, 0);
I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
 
/* and GT */
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
 
I915_WRITE(DPINVGTT, 0xff);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
POSTING_READ(VLV_IER);
}
 
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
* duration to 2ms (which is the minimum in the Display Port spec)
299,13 → 1588,6
u32 render_irqs;
u32 hotplug_mask;
 
DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
 
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
dev_priv->irq_mask = ~display_mask;
 
/* should always can generate irq */
322,8 → 1604,8
if (IS_GEN6(dev))
render_irqs =
GT_USER_INTERRUPT |
GT_GEN6_BSD_USER_INTERRUPT |
GT_BLT_USER_INTERRUPT;
GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT;
else
render_irqs =
GT_USER_INTERRUPT |
352,7 → 1634,7
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
 
ironlake_enable_pch_hotplug(dev);
// ironlake_enable_pch_hotplug(dev);
 
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
364,11 → 1646,828
return 0;
}
 
static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask =
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB;
u32 render_irqs;
u32 hotplug_mask;
 
dev_priv->irq_mask = ~display_mask;
 
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
I915_WRITE(DEIER,
display_mask |
DE_PIPEC_VBLANK_IVB |
DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB);
POSTING_READ(DEIER);
 
dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
 
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask;
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
 
// ironlake_enable_pch_hotplug(dev);
 
return 0;
}
 
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u16 msid;
 
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
/*
*Leave vblank interrupts masked initially. enable/disable will
* toggle them based on usage.
*/
dev_priv->irq_mask = (~enable_mask) |
I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
/* Hack for broken MSIs on VLV */
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
pci_read_config_word(dev->pdev, 0x98, &msid);
msid &= 0xff; /* mask out delivery bits */
msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
 
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(PIPESTAT(0), 0xffff);
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
 
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
 
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
 
dev_priv->gt_irq_mask = ~0;
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
GT_GEN6_BLT_CS_ERROR_INTERRUPT |
GT_GEN6_BLT_USER_INTERRUPT |
GT_GEN6_BSD_USER_INTERRUPT |
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
GT_PIPE_NOTIFY |
GT_RENDER_CS_ERROR_INTERRUPT |
GT_SYNC_STATUS |
GT_USER_INTERRUPT);
POSTING_READ(GTIER);
 
/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
#endif
 
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
#if 0 /* FIXME: check register definitions; some have moved */
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
return 0;
}
 
 
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
if (!dev_priv)
return;
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
 
I915_WRITE(HWSTAM, 0xffffffff);
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IMR, 0xffffffff);
I915_WRITE(VLV_IER, 0x0);
POSTING_READ(VLV_IER);
}
 
static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
if (!dev_priv)
return;
 
I915_WRITE(HWSTAM, 0xffffffff);
 
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
I915_WRITE(DEIIR, I915_READ(DEIIR));
 
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
I915_WRITE(GTIIR, I915_READ(GTIIR));
 
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
}
 
#if 0
 
static void i8xx_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE16(IMR, 0xffff);
I915_WRITE16(IER, 0x0);
POSTING_READ16(IER);
}
 
static int i8xx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
I915_WRITE16(EMR,
~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
I915_WRITE16(IMR, dev_priv->irq_mask);
 
I915_WRITE16(IER,
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT);
POSTING_READ16(IER);
 
return 0;
}
 
 
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
int irq_received;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
 
atomic_inc(&dev_priv->irq_received);
 
iir = I915_READ16(IIR);
if (iir == 0)
return IRQ_NONE;
 
while (iir & ~flip_mask) {
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/*
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = 1;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
I915_WRITE16(IIR, iir & ~flip_mask);
new_iir = I915_READ16(IIR); /* Flush posted writes */
 
i915_update_dri1_breadcrumb(dev);
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
 
if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, 0)) {
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip(dev, 0);
flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
}
}
 
if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, 1)) {
if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 1);
intel_finish_page_flip(dev, 1);
flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
}
}
 
iir = new_iir;
}
 
return IRQ_HANDLED;
}
 
static void i8xx_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
for_each_pipe(pipe) {
/* Clear enable bits; then clear status bits */
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
}
I915_WRITE16(IMR, 0xffff);
I915_WRITE16(IER, 0x0);
I915_WRITE16(IIR, I915_READ16(IIR));
}
 
#endif
 
static void i915_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
 
I915_WRITE16(HWSTAM, 0xeffe);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
POSTING_READ(IER);
}
 
static int i915_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
 
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask =
~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
#if 0
if (I915_HAS_HOTPLUG(dev)) {
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
#endif
 
I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
 
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
#if 0
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
/* Ignore TV since it's buggy */
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
 
// intel_opregion_enable_asle(dev);
 
return 0;
}
 
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
u32 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
u32 flip[2] = {
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
};
int pipe, ret = IRQ_NONE;
 
atomic_inc(&dev_priv->irq_received);
 
iir = I915_READ(IIR);
do {
bool irq_received = (iir & ~flip_mask) != 0;
bool blc_event = false;
 
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/* Clear the PIPE*STAT regs before the IIR */
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = true;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
if (!irq_received)
break;
 
/* Consume port. Then clear IIR or we'll miss events */
if ((I915_HAS_HOTPLUG(dev)) &&
(iir & I915_DISPLAY_PORT_INTERRUPT)) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
// if (hotplug_status & dev_priv->hotplug_supported_mask)
// queue_work(dev_priv->wq,
// &dev_priv->hotplug_work);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
}
 
I915_WRITE(IIR, iir & ~flip_mask);
new_iir = I915_READ(IIR); /* Flush posted writes */
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
 
for_each_pipe(pipe) {
int plane = pipe;
if (IS_MOBILE(dev))
plane = !plane;
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, pipe)) {
if (iir & flip[plane]) {
// intel_prepare_page_flip(dev, plane);
// intel_finish_page_flip(dev, pipe);
flip_mask &= ~flip[plane];
}
}
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
}
 
// if (blc_event || (iir & I915_ASLE_INTERRUPT))
// intel_opregion_asle_intr(dev);
 
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
* we would never get another interrupt.
*
* This is fine on non-MSI as well, as if we hit this path
* we avoid exiting the interrupt handler only to generate
* another one.
*
* Note that for MSI this could cause a stray interrupt report
* if an interrupt landed in the time between writing IIR and
* the posting read. This should be rare enough to never
* trigger the 99% of 100,000 interrupts test for disabling
* stray interrupts.
*/
ret = IRQ_HANDLED;
iir = new_iir;
} while (iir & ~flip_mask);
 
i915_update_dri1_breadcrumb(dev);
 
return ret;
}
 
static void i915_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
}
 
I915_WRITE16(HWSTAM, 0xffff);
for_each_pipe(pipe) {
/* Clear enable bits; then clear status bits */
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
}
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
 
I915_WRITE(IIR, I915_READ(IIR));
}
 
static void i965_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
atomic_set(&dev_priv->irq_received, 0);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
I915_WRITE(HWSTAM, 0xeffe);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
POSTING_READ(IER);
}
 
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
 
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
I915_DISPLAY_PORT_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
 
enable_mask = ~dev_priv->irq_mask;
enable_mask |= I915_USER_INTERRUPT;
 
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
 
/*
* Enable some error detection, note the instruction error mask
* bit is reserved, so we leave it masked.
*/
if (IS_G4X(dev)) {
error_mask = ~(GM45_ERROR_PAGE_TABLE |
GM45_ERROR_MEM_PRIV |
GM45_ERROR_CP_PRIV |
I915_ERROR_MEMORY_REFRESH);
} else {
error_mask = ~(I915_ERROR_PAGE_TABLE |
I915_ERROR_MEMORY_REFRESH);
}
I915_WRITE(EMR, error_mask);
 
I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
 
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
#if 0
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
} else {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
}
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
 
/* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three
seconds later. So just do it once.
*/
if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
/* Ignore TV since it's buggy */
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
// intel_opregion_enable_asle(dev);
 
return 0;
}
 
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 iir, new_iir;
u32 pipe_stats[I915_MAX_PIPES];
unsigned long irqflags;
int irq_received;
int ret = IRQ_NONE, pipe;
 
atomic_inc(&dev_priv->irq_received);
 
iir = I915_READ(IIR);
 
for (;;) {
bool blc_event = false;
 
irq_received = iir != 0;
 
/* Can't rely on pipestat interrupt bit in iir as it might
* have been cleared after the pipestat interrupt was received.
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
i915_handle_error(dev, false);
 
for_each_pipe(pipe) {
int reg = PIPESTAT(pipe);
pipe_stats[pipe] = I915_READ(reg);
 
/*
* Clear the PIPE*STAT regs before the IIR
*/
if (pipe_stats[pipe] & 0x8000ffff) {
if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
irq_received = 1;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
if (!irq_received)
break;
 
ret = IRQ_HANDLED;
 
/* Consume port. Then clear IIR or we'll miss events */
if (iir & I915_DISPLAY_PORT_INTERRUPT) {
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
// if (hotplug_status & dev_priv->hotplug_supported_mask)
// queue_work(dev_priv->wq,
// &dev_priv->hotplug_work);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
 
I915_WRITE(IIR, iir);
new_iir = I915_READ(IIR); /* Flush posted writes */
 
if (iir & I915_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[RCS]);
if (iir & I915_BSD_USER_INTERRUPT)
notify_ring(dev, &dev_priv->ring[VCS]);
 
// if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
// intel_prepare_page_flip(dev, 0);
 
// if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
// intel_prepare_page_flip(dev, 1);
 
for_each_pipe(pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
drm_handle_vblank(dev, pipe)) {
// i915_pageflip_stall_check(dev, pipe);
// intel_finish_page_flip(dev, pipe);
}
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
}
 
 
// if (blc_event || (iir & I915_ASLE_INTERRUPT))
// intel_opregion_asle_intr(dev);
 
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
* we would never get another interrupt.
*
* This is fine on non-MSI as well, as if we hit this path
* we avoid exiting the interrupt handler only to generate
* another one.
*
* Note that for MSI this could cause a stray interrupt report
* if an interrupt landed in the time between writing IIR and
* the posting read. This should be rare enough to never
* trigger the 99% of 100,000 interrupts test for disabling
* stray interrupts.
*/
iir = new_iir;
}
 
i915_update_dri1_breadcrumb(dev);
 
return ret;
}
 
static void i965_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
if (!dev_priv)
return;
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
 
I915_WRITE(HWSTAM, 0xffffffff);
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
 
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe),
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
 
void intel_irq_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
#if 0
if (IS_IVYBRIDGE(dev)) {
// INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
// INIT_WORK(&dev_priv->error_work, i915_error_work_func);
// INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
// INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work);
 
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
 
// if (drm_core_check_feature(dev, DRIVER_MODESET))
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
// else
// dev->driver->get_vblank_timestamp = NULL;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
 
if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_uninstall = valleyview_irq_uninstall;
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
} else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
376,6 → 2475,14
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (IS_HASWELL(dev)) {
/* Share interrupts handling with IVB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_uninstall = ironlake_irq_uninstall;
dev->driver->enable_vblank = ivybridge_enable_vblank;
dev->driver->disable_vblank = ivybridge_disable_vblank;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
384,10 → 2491,22
dev->driver->enable_vblank = ironlake_enable_vblank;
dev->driver->disable_vblank = ironlake_disable_vblank;
} else {
dev->driver->irq_preinstall = i915_driver_irq_preinstall;
dev->driver->irq_postinstall = i915_driver_irq_postinstall;
dev->driver->irq_uninstall = i915_driver_irq_uninstall;
dev->driver->irq_handler = i915_driver_irq_handler;
if (INTEL_INFO(dev)->gen == 2) {
dev->driver->irq_preinstall = i8xx_irq_preinstall;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_uninstall;
} else if (INTEL_INFO(dev)->gen == 3) {
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_uninstall;
dev->driver->irq_handler = i915_irq_handler;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_uninstall = i965_irq_uninstall;
dev->driver->irq_handler = i965_irq_handler;
}
dev->driver->enable_vblank = i915_enable_vblank;
dev->driver->disable_vblank = i915_disable_vblank;
}
/drivers/video/drm/i915/i915_reg.h
27,6 → 27,11
 
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
 
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 
#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a))
#define _MASKED_BIT_DISABLE(a) ((a) << 16)
 
/*
* The Bridge device's PCI config space has information about the
* fb aperture size and the amount of pre-reserved memory.
77,6 → 82,7
#define GRDOM_FULL (0<<2)
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
#define GRDOM_RESET_ENABLE (1<<0)
 
#define GEN6_MBCUNIT_SNPCR 0x900c /* for LLC config */
#define GEN6_MBC_SNPCR_SHIFT 21
86,6 → 92,13
#define GEN6_MBC_SNPCR_LOW (2<<21)
#define GEN6_MBC_SNPCR_MIN (3<<21) /* only 1/16th of the cache is shared */
 
#define GEN6_MBCTL 0x0907c
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
#define GEN6_MBCTL_BME_UPDATE_ENABLE (1 << 2)
#define GEN6_MBCTL_MAE_UPDATE_ENABLE (1 << 1)
#define GEN6_MBCTL_BOOT_FETCH_MECH (1 << 0)
 
#define GEN6_GDRST 0x941c
#define GEN6_GRDOM_FULL (1 << 0)
#define GEN6_GRDOM_RENDER (1 << 1)
92,6 → 105,40
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
 
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
 
#define GEN6_PDE_VALID (1 << 0)
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
#define GEN6_PTE_GFDT (1 << 3)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
#define PP_DIR_DCLV_2G 0xffffffff
 
#define GAM_ECOCHK 0x4090
#define ECOCHK_SNB_BIT (1<<10)
#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
 
#define GAC_ECO_BITS 0x14090
#define ECOBITS_PPGTT_CACHE64B (3<<8)
#define ECOBITS_PPGTT_CACHE4B (0<<8)
 
#define GAB_CTL 0x24000
#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
 
/* VGA stuff */
 
#define VGA_ST01_MDA 0x3ba
164,6 → 211,17
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
/* IVB has funny definitions for which plane to flip. */
#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
#define MI_ARB_ENABLE (1<<0)
#define MI_ARB_DISABLE (0<<0)
 
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
189,6 → 247,7
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
#define MI_SEMAPHORE_UPDATE (1<<21)
244,6 → 303,7
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
#define PIPE_CONTROL_WRITE_FLUSH (1<<12)
268,7 → 328,62
#define DEBUG_RESET_RENDER (1<<8)
#define DEBUG_RESET_DISPLAY (1<<9)
 
/*
* DPIO - a special bus for various display related registers to hide behind:
* 0x800c: m1, m2, n, p1, p2, k dividers
* 0x8014: REF and SFR select
* 0x8014: N divider, VCO select
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
*/
#define DPIO_PKT 0x2100
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
#define DPIO_DATA 0x2104
#define DPIO_REG 0x2108
#define DPIO_CTL 0x2110
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
#define DPIO_RESET (1<<0)
 
#define _DPIO_DIV_A 0x800c
#define DPIO_POST_DIV_SHIFT (28) /* 3 bits */
#define DPIO_K_SHIFT (24) /* 4 bits */
#define DPIO_P1_SHIFT (21) /* 3 bits */
#define DPIO_P2_SHIFT (16) /* 5 bits */
#define DPIO_N_SHIFT (12) /* 4 bits */
#define DPIO_ENABLE_CALIBRATION (1<<11)
#define DPIO_M1DIV_SHIFT (8) /* 3 bits */
#define DPIO_M2DIV_MASK 0xff
#define _DPIO_DIV_B 0x802c
#define DPIO_DIV(pipe) _PIPE(pipe, _DPIO_DIV_A, _DPIO_DIV_B)
 
#define _DPIO_REFSFR_A 0x8014
#define DPIO_REFSEL_OVERRIDE 27
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
#define DPIO_REFSFR(pipe) _PIPE(pipe, _DPIO_REFSFR_A, _DPIO_REFSFR_B)
 
#define _DPIO_CORE_CLK_A 0x801c
#define _DPIO_CORE_CLK_B 0x803c
#define DPIO_CORE_CLK(pipe) _PIPE(pipe, _DPIO_CORE_CLK_A, _DPIO_CORE_CLK_B)
 
#define _DPIO_LFP_COEFF_A 0x8048
#define _DPIO_LFP_COEFF_B 0x8068
#define DPIO_LFP_COEFF(pipe) _PIPE(pipe, _DPIO_LFP_COEFF_A, _DPIO_LFP_COEFF_B)
 
#define DPIO_FASTCLK_DISABLE 0x8100
 
/*
* Fence registers
*/
295,6 → 410,12
#define FENCE_REG_SANDYBRIDGE_0 0x100000
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
 
/* control register for cpu gtt access */
#define TILECTL 0x101000
#define TILECTL_SWZCTL (1 << 0)
#define TILECTL_TLB_PREFETCH_DIS (1 << 2)
#define TILECTL_BACKSNOOP_DIS (1 << 3)
 
/*
* Instruction and interrupt control regs
*/
318,12 → 439,18
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define ARB_MODE 0x04030
#define ARB_MODE_SWIZZLE_SNB (1<<4)
#define ARB_MODE_SWIZZLE_IVB (1<<5)
#define RENDER_HWS_PGA_GEN7 (0x04080)
#define RING_FAULT_REG(ring) (0x4094 + 0x100*(ring)->id)
#define DONE_REG 0x40b0
#define BSD_HWS_PGA_GEN7 (0x04180)
#define BLT_HWS_PGA_GEN7 (0x04280)
#define RING_ACTHD(base) ((base)+0x74)
#define RING_NOPID(base) ((base)+0x94)
#define RING_IMR(base) ((base)+0xa8)
#define RING_TIMESTAMP(base) ((base)+0x358)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
352,6 → 479,17
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
#define GEN7_INSTDONE_1 0x0206c
#define GEN7_SC_INSTDONE 0x07100
#define GEN7_SAMPLER_INSTDONE 0x0e160
#define GEN7_ROW_INSTDONE 0x0e164
#define I915_NUM_INSTDONE_REG 4
#define RING_IPEIR(base) ((base)+0x64)
#define RING_IPEHR(base) ((base)+0x68)
#define RING_INSTDONE(base) ((base)+0x6c)
#define RING_INSTPS(base) ((base)+0x70)
#define RING_DMA_FADD(base) ((base)+0x78)
#define RING_INSTPM(base) ((base)+0xc0)
#define INSTPS 0x02070 /* 965+ only */
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
365,16 → 503,11
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
#define VCS_INSTDONE 0x1206C
#define VCS_IPEIR 0x12064
#define VCS_IPEHR 0x12068
#define VCS_ACTHD 0x12074
#define BCS_INSTDONE 0x2206C
#define BCS_IPEIR 0x22064
#define BCS_IPEHR 0x22068
#define BCS_ACTHD 0x22074
#define DMA_FADD_I8XX 0x020d0
 
#define ERROR_GEN6 0x040a0
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
 
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
388,13 → 521,18
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
 
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 11)
# define MI_FLUSH_ENABLE (1 << 12)
 
#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
 
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
#define GFX_RUN_LIST_ENABLE (1<<15)
#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
#define GFX_SURFACE_FAULT_ENABLE (1<<12)
402,8 → 540,7
#define GFX_PSMI_GRANULARITY (1<<10)
#define GFX_PPGTT_ENABLE (1<<9)
 
#define GFX_MODE_ENABLE(bit) (((bit) << 16) | (bit))
#define GFX_MODE_DISABLE(bit) (((bit) << 16) | (0))
#define VLV_DISPLAY_BASE 0x180000
 
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
410,6 → 547,11
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
#define VLV_IMR 0x1820a8
#define VLV_ISR 0x1820ac
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
455,7 → 597,6
#define LM_BURST_LENGTH 0x00000700
#define LM_FIFO_WATERMARK 0x0000001F
#define MI_ARB_STATE 0x020e4 /* 915+ only */
#define MI_ARB_MASK_SHIFT 16 /* shift for enable bits */
 
/* Make render/texture TLB fetches lower priorty than associated data
* fetches. This is not turned on by default
520,9 → 661,9
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
 
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_MASK_SHIFT 16
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
#define CM0_DEPTH_EVICT_DISABLE (1<<4)
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
533,7 → 674,12
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
 
/* GEN6 interrupt control */
#define CACHE_MODE_1 0x7004 /* IVB+ */
#define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6)
 
/* GEN6 interrupt control
* Note that the per-ring interrupt bits do alias with the global interrupt bits
* in GTIMR. */
#define GEN6_RENDER_HWSTAM 0x2098
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
558,10 → 704,10
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
 
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK (1 << 16)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0)
#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
#define GEN6_BSD_SLEEP_INDICATOR (1 << 3)
#define GEN6_BSD_GO_INDICATOR (1 << 4)
 
#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
569,6 → 715,21
 
#define GEN6_BSD_RNCID 0x12198
 
#define GEN7_FF_THREAD_MODE 0x20a0
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
#define GEN7_FF_VS_SCHED_HW (0x0<<12)
#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
#define GEN7_FF_DS_SCHED_HW (0x0<<4)
 
/*
* Framebuffer compression (915+ only)
*/
697,9 → 858,9
#define GMBUS_PORT_PANEL 3
#define GMBUS_PORT_DPC 4 /* HDMIC */
#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
/* 6 reserved */
#define GMBUS_PORT_DPD 7 /* HDMID */
#define GMBUS_NUM_PORTS 8
#define GMBUS_PORT_DPD 6 /* HDMID */
#define GMBUS_PORT_RESERVED 7 /* 7 reserved */
#define GMBUS_NUM_PORTS (GMBUS_PORT_DPD - GMBUS_PORT_SSC + 1)
#define GMBUS1 0x5104 /* command/status */
#define GMBUS_SW_CLR_INT (1<<31)
#define GMBUS_SW_RDY (1<<30)
751,7 → 912,9
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
#define DPLL_VGA_MODE_DIS (1 << 28)
#define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */
#define DPLLB_MODE_LVDS (2 << 26) /* i915 */
763,6 → 926,8
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
 
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
858,6 → 1023,7
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
 
#define _FPA0 0x06040
#define _FPA1 0x06044
#define _FPB0 0x06048
998,6 → 1164,9
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
 
#define FW_BLC_SELF_VLV 0x6500
#define FW_CSPWRDWNEN (1<<15)
 
/*
* Palette regs
*/
1037,6 → 1206,29
#define C0DRB3 0x10206
#define C1DRB3 0x10606
 
/** snb MCH registers for reading the DRAM channel configuration */
#define MAD_DIMM_C0 (MCHBAR_MIRROR_BASE_SNB + 0x5004)
#define MAD_DIMM_C1 (MCHBAR_MIRROR_BASE_SNB + 0x5008)
#define MAD_DIMM_C2 (MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define MAD_DIMM_ECC_MASK (0x3 << 24)
#define MAD_DIMM_ECC_OFF (0x0 << 24)
#define MAD_DIMM_ECC_IO_ON_LOGIC_OFF (0x1 << 24)
#define MAD_DIMM_ECC_IO_OFF_LOGIC_ON (0x2 << 24)
#define MAD_DIMM_ECC_ON (0x3 << 24)
#define MAD_DIMM_ENH_INTERLEAVE (0x1 << 22)
#define MAD_DIMM_RANK_INTERLEAVE (0x1 << 21)
#define MAD_DIMM_B_WIDTH_X16 (0x1 << 20) /* X8 chips if unset */
#define MAD_DIMM_A_WIDTH_X16 (0x1 << 19) /* X8 chips if unset */
#define MAD_DIMM_B_DUAL_RANK (0x1 << 18)
#define MAD_DIMM_A_DUAL_RANK (0x1 << 17)
#define MAD_DIMM_A_SELECT (0x1 << 16)
/* DIMM sizes are in multiples of 256mb. */
#define MAD_DIMM_B_SIZE_SHIFT 8
#define MAD_DIMM_B_SIZE_MASK (0xff << MAD_DIMM_B_SIZE_SHIFT)
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
 
 
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
1280,6 → 1472,10
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
 
#define GEN6_GT_THREAD_STATUS_REG 0x13805c
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
 
#define GEN6_GT_PERF_STATUS 0x145948
#define GEN6_RP_STATE_LIMITS 0x145994
#define GEN6_RP_STATE_CAP 0x145998
1289,6 → 1485,39
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
#define CXT_SIZE 0x21a0
#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_RENDER_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CXT_SIZE 0x21a8
#define GEN7_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 25) & 0x7f)
#define GEN7_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 22) & 0x7)
#define GEN7_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 16) & 0x3f)
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
GEN7_CXT_RING_SIZE(ctx_reg) + \
GEN7_CXT_RENDER_SIZE(ctx_reg) + \
GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_GT1_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
#define HSW_CXT_POWER_SIZE(ctx_reg) ((ctx_reg >> 26) & 0x3f)
#define HSW_CXT_RING_SIZE(ctx_reg) ((ctx_reg >> 23) & 0x7)
#define HSW_CXT_RENDER_SIZE(ctx_reg) ((ctx_reg >> 15) & 0xff)
#define HSW_CXT_TOTAL_SIZE(ctx_reg) (HSW_CXT_POWER_SIZE(ctx_reg) + \
HSW_CXT_RING_SIZE(ctx_reg) + \
HSW_CXT_RENDER_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
 
 
/*
* Overlay regs
*/
1316,6 → 1545,7
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
 
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
1326,7 → 1556,9
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
 
 
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
1334,9 → 1566,13
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
#define VLV_ADPA (VLV_DISPLAY_BASE + ADPA)
 
#define ADPA_DAC_ENABLE (1<<31)
#define ADPA_DAC_DISABLE 0
#define ADPA_PIPE_SELECT_MASK (1<<30)
1343,6 → 1579,26
#define ADPA_PIPE_A_SELECT 0
#define ADPA_PIPE_B_SELECT (1<<30)
#define ADPA_PIPE_SELECT(pipe) ((pipe) << 30)
/* CPT uses bits 29:30 for pch transcoder select */
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
1389,12 → 1645,21
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
 
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
#define DPB_HOTPLUG_INT_STATUS (1 << 29)
#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
#define DPC_HOTPLUG_INT_STATUS (1 << 28)
#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (1 << 27)
/* HDMI/DP bits are gen4+ */
#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (3 << 21)
#define DPC_HOTPLUG_INT_STATUS (3 << 19)
#define DPB_HOTPLUG_INT_STATUS (3 << 17)
/* HDMI bits are shared with the DP bits */
#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
1401,8 → 1666,13
#define CRT_HOTPLUG_MONITOR_COLOR (3 << 8)
#define CRT_HOTPLUG_MONITOR_MONO (2 << 8)
#define CRT_HOTPLUG_MONITOR_NONE (0 << 8)
#define SDVOC_HOTPLUG_INT_STATUS (1 << 7)
#define SDVOB_HOTPLUG_INT_STATUS (1 << 6)
/* SDVO is different across gen3/4 */
#define SDVOC_HOTPLUG_INT_STATUS_G4X (1 << 3)
#define SDVOB_HOTPLUG_INT_STATUS_G4X (1 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I965 (3 << 4)
#define SDVOB_HOTPLUG_INT_STATUS_I965 (3 << 2)
#define SDVOC_HOTPLUG_INT_STATUS_I915 (1 << 7)
#define SDVOB_HOTPLUG_INT_STATUS_I915 (1 << 6)
 
/* SDVO port control */
#define SDVOB 0x61140
1527,12 → 1797,21
 
/* Video Data Island Packet control */
#define VIDEO_DIP_DATA 0x61178
/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT_B (1 << 29)
#define VIDEO_DIP_PORT_C (2 << 29)
#define VIDEO_DIP_PORT_D (3 << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
#define VIDEO_DIP_ENABLE_GCP (1 << 25)
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
1541,6 → 1820,14
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
#define VIDEO_DIP_FREQ_VSYNC (1 << 16)
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
#define VIDEO_DIP_ENABLE_VS_HSW (1 << 8)
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
 
/* Panel power sequencing */
#define PP_STATUS 0x61200
1611,10 → 1898,26
#define PFIT_AUTO_RATIOS 0x61238
 
/* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
#define BLM_PIPE_SELECT_IVB (3 << 29)
#define BLM_PIPE_A (0 << 29)
#define BLM_PIPE_B (1 << 29)
#define BLM_PIPE_C (2 << 29) /* ivb + */
#define BLM_PIPE(pipe) ((pipe) << 29)
#define BLM_POLARITY_I965 (1 << 28) /* gen4 only */
#define BLM_PHASE_IN_INTERUPT_STATUS (1 << 26)
#define BLM_PHASE_IN_ENABLE (1 << 25)
#define BLM_PHASE_IN_INTERUPT_ENABL (1 << 24)
#define BLM_PHASE_IN_TIME_BASE_SHIFT (16)
#define BLM_PHASE_IN_TIME_BASE_MASK (0xff << 16)
#define BLM_PHASE_IN_COUNT_SHIFT (8)
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
#define BLC_PWM_CTL 0x61254
#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
#define BLM_COMBINATION_MODE (1 << 30)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
1621,8 → 1924,9
*
* The actual value is this field multiplied by two.
*/
#define BACKLIGHT_MODULATION_FREQ_SHIFT (17)
#define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17)
#define BLM_LEGACY_MODE (1 << 16)
#define BLM_LEGACY_MODE (1 << 16) /* gen2 only */
/*
* This is the number of cycles out of the backlight modulation cycle for which
* the backlight is on.
1632,9 → 1936,24
*/
#define BACKLIGHT_DUTY_CYCLE_SHIFT (0)
#define BACKLIGHT_DUTY_CYCLE_MASK (0xffff)
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
 
#define BLC_HIST_CTL 0x61260
 
/* New registers for PCH-split platforms. Safe where new bits show up, the
* register layout machtes with gen4 BLC_PWM_CTL[12]. */
#define BLC_PWM_CPU_CTL2 0x48250
#define BLC_PWM_CPU_CTL 0x48254
 
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250
#define BLM_PCH_PWM_ENABLE (1 << 31)
#define BLM_PCH_OVERRIDE_ENABLE (1 << 30)
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254
 
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
2307,12 → 2626,14
 
/* Pipe A */
#define _PIPEADSL 0x70000
#define DSL_LINEMASK 0x00000fff
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
#define I965_PIPECONF_ACTIVE (1<<30)
#define PIPECONF_FRAME_START_DELAY_MASK (3<<27)
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1<<25)
2319,10 → 2640,21
#define PIPECONF_PALETTE 0
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */
#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */
/* Ironlake and later have a complete new set of values for interlaced. PFIT
* means panel fitter required, PF means progressive fetch, DBL means power
* saving pixel doubling. */
#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21)
#define PIPECONF_INTERLACED_ILK (3 << 21)
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
2337,13 → 2669,16
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
#define PIPE_CRC_DONE_ENABLE (1UL<<28)
#define PIPE_GMBUS_EVENT_ENABLE (1UL<<27)
#define PLANE_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26)
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
2350,10 → 2685,14
#define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */
#define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17)
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
#define PLANE_FLIPDONE_INT_STATUS_VLV (1UL<<10)
#define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10)
#define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9)
#define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
2378,6 → 2717,40
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
#define VLV_DPFLIPSTAT 0x70028
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
#define SPRITED_FLIPDONE_INT_EN (1<<26)
#define SPRITEC_FLIPDONE_INT_EN (1<<25)
#define PLANEB_FLIPDONE_INT_EN (1<<24)
#define PIPEA_LINE_COMPARE_INT_EN (1<<21)
#define PIPEA_HLINE_INT_EN (1<<20)
#define PIPEA_VBLANK_INT_EN (1<<19)
#define SPRITEB_FLIPDONE_INT_EN (1<<18)
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
 
#define DPINVGTT 0x7002c /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
#define SPRITEC_INVALID_GTT_INT_EN (1<<20)
#define PLANEB_INVALID_GTT_INT_EN (1<<19)
#define SPRITEB_INVALID_GTT_INT_EN (1<<18)
#define SPRITEA_INVALID_GTT_INT_EN (1<<17)
#define PLANEA_INVALID_GTT_INT_EN (1<<16)
#define DPINVGTT_EN_MASK 0xff0000
#define CURSORB_INVALID_GTT_STATUS (1<<7)
#define CURSORA_INVALID_GTT_STATUS (1<<6)
#define SPRITED_INVALID_GTT_STATUS (1<<5)
#define SPRITEC_INVALID_GTT_STATUS (1<<4)
#define PLANEB_INVALID_GTT_STATUS (1<<3)
#define SPRITEB_INVALID_GTT_STATUS (1<<2)
#define SPRITEA_INVALID_GTT_STATUS (1<<1)
#define PLANEA_INVALID_GTT_STATUS (1<<0)
#define DPINVGTT_STATUS_MASK 0xff
 
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
2407,11 → 2780,28
#define DSPFW_HPLL_CURSOR_MASK (0x3f<<16)
#define DSPFW_HPLL_SR_MASK (0x1ff)
 
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
#define VLV_DDL1 0x70050
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
#define VLV_DDL2 0x70054
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
#define DDL_PLANEB_PRECISION_32 (1<<7)
#define DDL_PLANEB_PRECISION_16 (0<<7)
 
/* FIFO watermark sizes etc */
#define G4X_FIFO_LINE_SIZE 64
#define I915_FIFO_LINE_SIZE 64
#define I830_FIFO_LINE_SIZE 32
 
#define VALLEYVIEW_FIFO_SIZE 255
#define G4X_FIFO_SIZE 127
#define I965_FIFO_SIZE 512
#define I945_FIFO_SIZE 127
2419,6 → 2809,7
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
 
#define VALLEYVIEW_MAX_WM 0xff
#define G4X_MAX_WM 0x3f
#define I915_MAX_WM 0x3f
 
2433,6 → 2824,7
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
 
#define VALLEYVIEW_CURSOR_MAX_WM 64
#define I965_CURSOR_FIFO 64
#define I965_CURSOR_MAX_WM 32
#define I965_CURSOR_DFT_WM 8
2640,7 → 3032,15
#define DSPSIZE(plane) _PIPE(plane, _DSPASIZE, _DSPBSIZE)
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
 
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
 
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
2689,7 → 3089,7
#define DVS_FORMAT_RGBX888 (2<<25)
#define DVS_FORMAT_RGBX161616 (3<<25)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_RGBX (1<<20)
#define DVS_RGB_ORDER_XBGR (1<<20)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
#define DVS_YUV_ORDER_YUYV (0<<16)
#define DVS_YUV_ORDER_UYVY (1<<16)
2973,25 → 3373,38
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
#define DE_PIPEC_VBLANK_IVB (1<<10)
#define DE_SPRITEB_FLIP_DONE_IVB (1<<9)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_SPRITEA_FLIP_DONE_IVB (1<<4)
#define DE_PLANEB_FLIP_DONE_IVB (1<<8)
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEB_VBLANK_IVB (1<<5)
#define DE_PIPEA_VBLANK_IVB (1<<0)
 
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
 
#define DEISR 0x44000
#define DEIMR 0x44004
#define DEIIR 0x44008
#define DEIER 0x4400c
 
/* GT interrupt */
/* GT interrupt.
* Note that for gen6+ the ring-specific interrupt bits do alias with the
* corresponding bits in the per-ring interrupt control registers. */
#define GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT (1 << 26)
#define GT_GEN6_BLT_CS_ERROR_INTERRUPT (1 << 25)
#define GT_GEN6_BLT_USER_INTERRUPT (1 << 22)
#define GT_GEN6_BSD_CS_ERROR_INTERRUPT (1 << 15)
#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GT_BSD_USER_INTERRUPT (1 << 5) /* ilk only */
#define GT_GEN7_L3_PARITY_ERROR_INTERRUPT (1 << 5)
#define GT_PIPE_NOTIFY (1 << 4)
#define GT_RENDER_CS_ERROR_INTERRUPT (1 << 3)
#define GT_SYNC_STATUS (1 << 2)
#define GT_USER_INTERRUPT (1 << 0)
#define GT_BSD_USER_INTERRUPT (1 << 5)
#define GT_GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GT_BLT_USER_INTERRUPT (1 << 22)
 
#define GTISR 0x44010
#define GTIMR 0x44014
3028,9 → 3441,23
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
 
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
 
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
 
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
 
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
 
/* PCH */
 
/* south display engine interrupt */
/* south display engine interrupt: IBX */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
3066,15 → 3493,44
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
/* CPT */
#define SDE_CRT_HOTPLUG_CPT (1 << 19)
 
/* south display engine interrupt: CPT/PPT */
#define SDE_AUDIO_POWER_D_CPT (1 << 31)
#define SDE_AUDIO_POWER_C_CPT (1 << 30)
#define SDE_AUDIO_POWER_B_CPT (1 << 29)
#define SDE_AUDIO_POWER_SHIFT_CPT 29
#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
#define SDE_AUXD_CPT (1 << 27)
#define SDE_AUXC_CPT (1 << 26)
#define SDE_AUXB_CPT (1 << 25)
#define SDE_AUX_MASK_CPT (7 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
#define SDE_GMBUS_CPT (1 << 17)
#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
#define SDE_FDI_RXC_CPT (1 << 8)
#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
#define SDE_FDI_RXB_CPT (1 << 4)
#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
#define SDE_FDI_RXA_CPT (1 << 0)
#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
SDE_AUDIO_CP_REQ_B_CPT | \
SDE_AUDIO_CP_REQ_A_CPT)
#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
SDE_AUDIO_CP_CHG_B_CPT | \
SDE_AUDIO_CP_CHG_A_CPT)
#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
SDE_FDI_RXB_CPT | \
SDE_FDI_RXA_CPT)
 
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
3127,7 → 3583,7
 
#define _PCH_DPLL_A 0xc6014
#define _PCH_DPLL_B 0xc6018
#define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
 
#define _PCH_FPA0 0xc6040
#define FP_CB_TUNE (0x3<<22)
3134,8 → 3590,8
#define _PCH_FPA1 0xc6044
#define _PCH_FPB0 0xc6048
#define _PCH_FPB1 0xc604c
#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0)
#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
#define PCH_DPLL_TEST 0xc606c
 
3205,6 → 3661,7
#define _TRANS_VSYNC_A 0xe0014
#define TRANS_VSYNC_END_SHIFT 16
#define TRANS_VSYNC_START_SHIFT 0
#define _TRANS_VSYNCSHIFT_A 0xe0028
 
#define _TRANSA_DATA_M1 0xe0030
#define _TRANSA_DATA_N1 0xe0034
3229,6 → 3686,57
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
#define VLV_VIDEO_DIP_CTL_A 0x60220
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
 
#define VLV_VIDEO_DIP_CTL_B 0x61170
#define VLV_VIDEO_DIP_DATA_B 0x61174
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
 
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
#define VLV_TVIDEO_DIP_DATA(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_DATA_A, VLV_VIDEO_DIP_DATA_B)
#define VLV_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_GDCP_PAYLOAD_A, VLV_VIDEO_DIP_GDCP_PAYLOAD_B)
 
/* Haswell DIP controls */
#define HSW_VIDEO_DIP_CTL_A 0x60200
#define HSW_VIDEO_DIP_AVI_DATA_A 0x60220
#define HSW_VIDEO_DIP_VS_DATA_A 0x60260
#define HSW_VIDEO_DIP_SPD_DATA_A 0x602A0
#define HSW_VIDEO_DIP_GMP_DATA_A 0x602E0
#define HSW_VIDEO_DIP_VSC_DATA_A 0x60320
#define HSW_VIDEO_DIP_AVI_ECC_A 0x60240
#define HSW_VIDEO_DIP_VS_ECC_A 0x60280
#define HSW_VIDEO_DIP_SPD_ECC_A 0x602C0
#define HSW_VIDEO_DIP_GMP_ECC_A 0x60300
#define HSW_VIDEO_DIP_VSC_ECC_A 0x60344
#define HSW_VIDEO_DIP_GCP_A 0x60210
 
#define HSW_VIDEO_DIP_CTL_B 0x61200
#define HSW_VIDEO_DIP_AVI_DATA_B 0x61220
#define HSW_VIDEO_DIP_VS_DATA_B 0x61260
#define HSW_VIDEO_DIP_SPD_DATA_B 0x612A0
#define HSW_VIDEO_DIP_GMP_DATA_B 0x612E0
#define HSW_VIDEO_DIP_VSC_DATA_B 0x61320
#define HSW_VIDEO_DIP_BVI_ECC_B 0x61240
#define HSW_VIDEO_DIP_VS_ECC_B 0x61280
#define HSW_VIDEO_DIP_SPD_ECC_B 0x612C0
#define HSW_VIDEO_DIP_GMP_ECC_B 0x61300
#define HSW_VIDEO_DIP_VSC_ECC_B 0x61344
#define HSW_VIDEO_DIP_GCP_B 0x61210
 
#define HSW_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(pipe) \
_PIPE(pipe, HSW_VIDEO_DIP_GCP_A, HSW_VIDEO_DIP_GCP_B)
 
#define _TRANS_HTOTAL_B 0xe1000
#define _TRANS_HBLANK_B 0xe1004
#define _TRANS_HSYNC_B 0xe1008
3235,6 → 3743,7
#define _TRANS_VTOTAL_B 0xe100c
#define _TRANS_VBLANK_B 0xe1010
#define _TRANS_VSYNC_B 0xe1014
#define _TRANS_VSYNCSHIFT_B 0xe1028
 
#define TRANS_HTOTAL(pipe) _PIPE(pipe, _TRANS_HTOTAL_A, _TRANS_HTOTAL_B)
#define TRANS_HBLANK(pipe) _PIPE(pipe, _TRANS_HBLANK_A, _TRANS_HBLANK_B)
3242,6 → 3751,8
#define TRANS_VTOTAL(pipe) _PIPE(pipe, _TRANS_VTOTAL_A, _TRANS_VTOTAL_B)
#define TRANS_VBLANK(pipe) _PIPE(pipe, _TRANS_VBLANK_A, _TRANS_VBLANK_B)
#define TRANS_VSYNC(pipe) _PIPE(pipe, _TRANS_VSYNC_A, _TRANS_VSYNC_B)
#define TRANS_VSYNCSHIFT(pipe) _PIPE(pipe, _TRANS_VSYNCSHIFT_A, \
_TRANS_VSYNCSHIFT_B)
 
#define _TRANSB_DATA_M1 0xe1030
#define _TRANSB_DATA_N1 0xe1034
3275,7 → 3786,10
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21)
#define TRANS_LEGACY_INTERLACED_ILK (2<<21)
#define TRANS_8BPC (0<<5)
#define TRANS_10BPC (1<<5)
#define TRANS_6BPC (2<<5)
3383,6 → 3897,9
#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
/* LPT */
#define FDI_PORT_WIDTH_2X_LPT (1<<19)
#define FDI_PORT_WIDTH_1X_LPT (0<<19)
 
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
3390,6 → 3907,9
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
3417,31 → 3937,6
#define FDI_PLL_CTL_1 0xfe000
#define FDI_PLL_CTL_2 0xfe004
 
/* CRT */
#define PCH_ADPA 0xe1100
#define ADPA_TRANS_SELECT_MASK (1<<30)
#define ADPA_TRANS_A_SELECT 0
#define ADPA_TRANS_B_SELECT (1<<30)
#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
 
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
3473,21 → 3968,19
#define PCH_LVDS 0xe1180
#define LVDS_DETECTED (1 << 1)
 
#define BLC_PWM_CPU_CTL2 0x48250
#define PWM_ENABLE (1 << 31)
#define PWM_PIPE_A (0 << 29)
#define PWM_PIPE_B (1 << 29)
#define BLC_PWM_CPU_CTL 0x48254
/* vlv has 2 sets of panel control regs. */
#define PIPEA_PP_STATUS 0x61200
#define PIPEA_PP_CONTROL 0x61204
#define PIPEA_PP_ON_DELAYS 0x61208
#define PIPEA_PP_OFF_DELAYS 0x6120c
#define PIPEA_PP_DIVISOR 0x61210
 
#define BLC_PWM_PCH_CTL1 0xc8250
#define PWM_PCH_ENABLE (1 << 31)
#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
#define PIPEB_PP_STATUS 0x61300
#define PIPEB_PP_CONTROL 0x61304
#define PIPEB_PP_ON_DELAYS 0x61308
#define PIPEB_PP_OFF_DELAYS 0x6130c
#define PIPEB_PP_DIVISOR 0x61310
 
#define BLC_PWM_PCH_CTL2 0xc8254
 
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
#define PANEL_UNLOCK_REGS (0xabcd << 16)
3551,6 → 4044,8
#define PORT_TRANS_C_SEL_CPT (2<<29)
#define PORT_TRANS_SEL_MASK (3<<29)
#define PORT_TRANS_SEL_CPT(pipe) ((pipe) << 29)
#define PORT_TO_PIPE(val) (((val) & (1<<30)) >> 30)
#define PORT_TO_PIPE_CPT(val) (((val) & PORT_TRANS_SEL_MASK) >> 29)
 
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
3608,6 → 4103,9
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
 
#define FORCEWAKE 0xA18C
#define FORCEWAKE_VLV 0x1300b0
#define FORCEWAKE_ACK_VLV 0x1300b4
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
3614,13 → 4112,29
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
 
#define GTFIFODBG 0x120000
#define GT_FIFO_CPU_ERROR_MASK 7
#define GT_FIFO_OVFERR (1<<2)
#define GT_FIFO_IAWRERR (1<<1)
#define GT_FIFO_IARDERR (1<<0)
 
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
 
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
 
#define GEN6_UCGCTL2 0x9404
# define GEN7_VDSUNIT_CLOCK_GATE_DISABLE (1 << 30)
# define GEN7_TDLUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
 
#define GEN7_UCGCTL4 0x940c
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
 
#define GEN6_RPNSWREQ 0xA008
#define GEN6_TURBO_DISABLE (1<<31)
#define GEN6_FREQUENCY(x) ((x)<<25)
3652,6 → 4166,7
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
#define GEN6_RP_UP_BUSY_AVG (0x2<<3)
#define GEN6_RP_UP_BUSY_CONT (0x4<<3)
#define GEN7_RP_DOWN_IDLE_AVG (0x2<<0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1<<0)
#define GEN6_RP_UP_THRESHOLD 0xA02C
#define GEN6_RP_DOWN_THRESHOLD 0xA030
3695,6 → 4210,11
GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT)
 
#define GEN6_GT_GFX_RC6_LOCKED 0x138104
#define GEN6_GT_GFX_RC6 0x138108
#define GEN6_GT_GFX_RC6p 0x13810C
#define GEN6_GT_GFX_RC6pp 0x138110
 
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
3711,6 → 4231,26
#define GEN6_RC6 3
#define GEN6_RC7 4
 
#define GEN7_MISCCPCTL (0x9424)
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
 
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1 0xB008 /* L3CD Error Status 1 */
#define GEN7_L3CDERRST1_ROW_MASK (0x7ff<<14)
#define GEN7_PARITY_ERROR_VALID (1<<13)
#define GEN7_L3CDERRST1_BANK_MASK (3<<11)
#define GEN7_L3CDERRST1_SUBBANK_MASK (7<<8)
#define GEN7_PARITY_ERROR_ROW(reg) \
((reg & GEN7_L3CDERRST1_ROW_MASK) >> 14)
#define GEN7_PARITY_ERROR_BANK(reg) \
((reg & GEN7_L3CDERRST1_BANK_MASK) >> 11)
#define GEN7_PARITY_ERROR_SUBBANK(reg) \
((reg & GEN7_L3CDERRST1_SUBBANK_MASK) >> 8)
#define GEN7_L3CDERRST1_ENABLE (1<<7)
 
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
 
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
3724,7 → 4264,15
#define G4X_HDMIW_HDMIEDID 0x6210C
 
#define IBX_HDMIW_HDMIEDID_A 0xE2050
#define IBX_HDMIW_HDMIEDID_B 0xE2150
#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
IBX_HDMIW_HDMIEDID_A, \
IBX_HDMIW_HDMIEDID_B)
#define IBX_AUD_CNTL_ST_A 0xE20B4
#define IBX_AUD_CNTL_ST_B 0xE21B4
#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
IBX_AUD_CNTL_ST_A, \
IBX_AUD_CNTL_ST_B)
#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
#define IBX_ELD_ADDRESS (0x1f << 5)
#define IBX_ELD_ACK (1 << 4)
3733,7 → 4281,15
#define IBX_CP_READYB (1 << 1)
 
#define CPT_HDMIW_HDMIEDID_A 0xE5050
#define CPT_HDMIW_HDMIEDID_B 0xE5150
#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
CPT_HDMIW_HDMIEDID_A, \
CPT_HDMIW_HDMIEDID_B)
#define CPT_AUD_CNTL_ST_A 0xE50B4
#define CPT_AUD_CNTL_ST_B 0xE51B4
#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
CPT_AUD_CNTL_ST_A, \
CPT_AUD_CNTL_ST_B)
#define CPT_AUD_CNTRL_ST2 0xE50C0
 
/* These are the 4 32-bit write offset registers for each stream
3742,4 → 4298,260
*/
#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
 
#define IBX_AUD_CONFIG_A 0xe2000
#define IBX_AUD_CONFIG_B 0xe2100
#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
IBX_AUD_CONFIG_A, \
IBX_AUD_CONFIG_B)
#define CPT_AUD_CONFIG_A 0xe5000
#define CPT_AUD_CONFIG_B 0xe5100
#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
CPT_AUD_CONFIG_A, \
CPT_AUD_CONFIG_B)
#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
#define AUD_CONFIG_UPPER_N_SHIFT 20
#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20)
#define AUD_CONFIG_LOWER_N_SHIFT 4
#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
#define AUD_CONFIG_PIXEL_CLOCK_HDMI (0xf << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
 
/* HSW Audio */
#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */
#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */
#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
HSW_AUD_CONFIG_A, \
HSW_AUD_CONFIG_B)
 
#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */
#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */
#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
HSW_AUD_MISC_CTRL_A, \
HSW_AUD_MISC_CTRL_B)
 
#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */
#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */
#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
HSW_AUD_DIP_ELD_CTRL_ST_A, \
HSW_AUD_DIP_ELD_CTRL_ST_B)
 
/* Audio Digital Converter */
#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */
#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */
#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
HSW_AUD_DIG_CNVT_1, \
HSW_AUD_DIG_CNVT_2)
#define DIP_PORT_SEL_MASK 0x3
 
#define HSW_AUD_EDID_DATA_A 0x65050
#define HSW_AUD_EDID_DATA_B 0x65150
#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
HSW_AUD_EDID_DATA_A, \
HSW_AUD_EDID_DATA_B)
 
#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */
#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */
#define AUDIO_INACTIVE_C (1<<11)
#define AUDIO_INACTIVE_B (1<<7)
#define AUDIO_INACTIVE_A (1<<3)
#define AUDIO_OUTPUT_ENABLE_A (1<<2)
#define AUDIO_OUTPUT_ENABLE_B (1<<6)
#define AUDIO_OUTPUT_ENABLE_C (1<<10)
#define AUDIO_ELD_VALID_A (1<<0)
#define AUDIO_ELD_VALID_B (1<<4)
#define AUDIO_ELD_VALID_C (1<<8)
#define AUDIO_CP_READY_A (1<<1)
#define AUDIO_CP_READY_B (1<<5)
#define AUDIO_CP_READY_C (1<<9)
 
/* HSW Power Wells */
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
#define HSW_PWR_WELL_FORCE_ON (1<<19)
#define HSW_PWR_WELL_CTL6 0x45414
 
/* Per-pipe DDI Function Control */
#define PIPE_DDI_FUNC_CTL_A 0x60400
#define PIPE_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_MASK (7<<20)
#define PIPE_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_6 (2<<20)
#define PIPE_DDI_BPC_12 (3<<20)
#define PIPE_DDI_PVSYNC (1<<17)
#define PIPE_DDI_PHSYNC (1<<16)
#define PIPE_DDI_BFI_ENABLE (1<<4)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
 
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
#define DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _PORT(port, DP_TP_CTL_A, DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1<<31)
#define DP_TP_CTL_MODE_SST (0<<27)
#define DP_TP_CTL_MODE_MST (1<<27)
#define DP_TP_CTL_ENHANCED_FRAME_ENABLE (1<<18)
#define DP_TP_CTL_FDI_AUTOTRAIN (1<<15)
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
 
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
 
/* DDI Buffer Control */
#define DDI_BUF_CTL_A 0x64000
#define DDI_BUF_CTL_B 0x64100
#define DDI_BUF_CTL(port) _PORT(port, DDI_BUF_CTL_A, DDI_BUF_CTL_B)
#define DDI_BUF_CTL_ENABLE (1<<31)
#define DDI_BUF_EMP_400MV_0DB_HSW (0<<24) /* Sel0 */
#define DDI_BUF_EMP_400MV_3_5DB_HSW (1<<24) /* Sel1 */
#define DDI_BUF_EMP_400MV_6DB_HSW (2<<24) /* Sel2 */
#define DDI_BUF_EMP_400MV_9_5DB_HSW (3<<24) /* Sel3 */
#define DDI_BUF_EMP_600MV_0DB_HSW (4<<24) /* Sel4 */
#define DDI_BUF_EMP_600MV_3_5DB_HSW (5<<24) /* Sel5 */
#define DDI_BUF_EMP_600MV_6DB_HSW (6<<24) /* Sel6 */
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
#define DDI_INIT_DISPLAY_DETECTED (1<<0)
 
/* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B)
 
/* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA,
* which contains the payload */
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
#define SBI_RESPONSE_SUCCESS (0x0<<1)
#define SBI_BUSY (0x1<<0)
#define SBI_READY (0x0<<0)
 
/* SBI offsets */
#define SBI_SSCDIVINTPHASE6 0x0600
#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1)
#define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1)
#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8)
#define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8)
#define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15)
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
 
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
#define PIXCLK_GATE_UNGATE (1<<0)
#define PIXCLK_GATE_GATE (0<<0)
 
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
 
/* WRPLL */
#define WRPLL_CTL1 0x46040
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
#define WRPLL_DIVIDER_POST(x) ((x)<<8)
#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
 
/* Port clock selection */
#define PORT_CLK_SEL_A 0x46100
#define PORT_CLK_SEL_B 0x46104
#define PORT_CLK_SEL(port) _PORT(port, PORT_CLK_SEL_A, PORT_CLK_SEL_B)
#define PORT_CLK_SEL_LCPLL_2700 (0<<29)
#define PORT_CLK_SEL_LCPLL_1350 (1<<29)
#define PORT_CLK_SEL_LCPLL_810 (2<<29)
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
 
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
 
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
 
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
#define PIPE_WM_LINETIME_B 0x45274
#define PIPE_WM_LINETIME(pipe) _PIPE(pipe, PIPE_WM_LINETIME_A, \
PIPE_WM_LINETIME_B)
#define PIPE_WM_LINETIME_MASK (0x1ff)
#define PIPE_WM_LINETIME_TIME(x) ((x))
#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff<<16)
#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x)<<16)
 
/* SFUSE_STRAP */
#define SFUSE_STRAP 0xc2014
#define SFUSE_STRAP_DDIB_DETECTED (1<<2)
#define SFUSE_STRAP_DDIC_DETECTED (1<<1)
#define SFUSE_STRAP_DDID_DETECTED (1<<0)
 
#define WM_DBG 0x45280
#define WM_DBG_DISALLOW_MULTIPLE_LP (1<<0)
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
#define WM_DBG_DISALLOW_SPRITE (1<<2)
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/i915_trace.h
5,7 → 5,7
#include <linux/types.h>
//#include <linux/tracepoint.h>
 
#define WARN_ON(x)
//#define WARN_ON(x)
 
#define trace_i915_gem_object_create(x)
#define trace_i915_gem_object_destroy(x)
19,5 → 19,6
#define trace_i915_gem_request_wait_begin(a, b)
#define trace_i915_gem_request_wait_end(a, b)
#define trace_i915_gem_request_complete(a, b)
#define trace_intel_gpu_freq_change(a)
 
#endif
/drivers/video/drm/i915/intel_bios.c
25,9 → 25,8
*
*/
#include <drm/drm_dp_helper.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_bios.h"
 
173,6 → 172,28
return (struct lvds_dvo_timing *)(entry + dvo_timing_offset);
}
 
/* get lvds_fp_timing entry
* this function may return NULL if the corresponding entry is invalid
*/
static const struct lvds_fp_timing *
get_lvds_fp_timing(const struct bdb_header *bdb,
const struct bdb_lvds_lfp_data *data,
const struct bdb_lvds_lfp_data_ptrs *ptrs,
int index)
{
size_t data_ofs = (const u8 *)data - (const u8 *)bdb;
u16 data_size = ((const u16 *)data)[-1]; /* stored in header */
size_t ofs;
 
if (index >= ARRAY_SIZE(ptrs->ptr))
return NULL;
ofs = ptrs->ptr[index].fp_timing_offset;
if (ofs < data_ofs ||
ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size)
return NULL;
return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs);
}
 
/* Try to find integrated panel data */
static void
parse_lfp_panel_data(struct drm_i915_private *dev_priv,
182,6 → 203,7
const struct bdb_lvds_lfp_data *lvds_lfp_data;
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
const struct lvds_dvo_timing *panel_dvo_timing;
const struct lvds_fp_timing *fp_timing;
struct drm_display_mode *panel_fixed_mode;
int i, downclock;
 
243,7 → 265,20
"Normal Clock %dKHz, downclock %dKHz\n",
panel_fixed_mode->clock, 10*downclock);
}
 
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
lvds_lfp_data_ptrs,
lvds_options->panel_type);
if (fp_timing) {
/* check the resolution, just to be sure */
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->bios_lvds_val = fp_timing->lvds_reg_val;
DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
dev_priv->bios_lvds_val);
}
}
}
 
/* Try to find sdvo panel data */
static void
255,6 → 290,11
int index;
 
index = i915_vbt_sdvo_panel_type;
if (index == -2) {
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
}
 
if (index == -1) {
struct bdb_sdvo_lvds_options *sdvo_lvds_options;
 
331,7 → 371,7
if (block_size >= sizeof(*general)) {
int bus_pin = general->crt_ddc_gmbus_pin;
DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
if (bus_pin >= 1 && bus_pin <= 6)
if (intel_gmbus_is_port_valid(bus_pin))
dev_priv->crt_ddc_pin = bus_pin;
} else {
DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
572,7 → 612,7
DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
return;
}
dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
dev_priv->child_dev = kcalloc(count, sizeof(*p_child), GFP_KERNEL);
if (!dev_priv->child_dev) {
DRM_DEBUG_KMS("No memory space for child device\n");
return;
630,7 → 670,7
*
* Returns 0 on success, nonzero on failure.
*/
bool
int
intel_parse_bios(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
669,7 → 709,7
}
 
if (!vbt) {
DRM_ERROR("VBT signature missing\n");
DRM_DEBUG_DRIVER("VBT signature missing\n");
pci_unmap_rom(pdev, bios);
return -1;
}
/drivers/video/drm/i915/intel_bios.h
28,7 → 28,7
#ifndef _I830_BIOS_H_
#define _I830_BIOS_H_
 
#include "drmP.h"
#include <drm/drmP.h>
 
struct vbt_header {
u8 signature[20]; /**< Always starts with 'VBT$' */
476,7 → 476,7
} __attribute__ ((packed));
 
void intel_setup_bios(struct drm_device *dev);
bool intel_parse_bios(struct drm_device *dev);
int intel_parse_bios(struct drm_device *dev);
 
/*
* Driver<->VBIOS interaction occurs through scratch bits in
/drivers/video/drm/i915/intel_crt.c
26,13 → 26,12
 
#include <linux/i2c.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_edid.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
/* Here's the desired hotplug mode */
46,6 → 45,7
struct intel_crt {
struct intel_encoder base;
bool force_hotplug_required;
u32 adpa_reg;
};
 
static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
54,21 → 54,68
struct intel_crt, base);
}
 
static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
return container_of(encoder, struct intel_crt, base);
}
 
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp;
 
if (HAS_PCH_SPLIT(dev))
reg = PCH_ADPA;
tmp = I915_READ(crt->adpa_reg);
 
if (!(tmp & ADPA_DAC_ENABLE))
return false;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
reg = ADPA;
*pipe = PORT_TO_PIPE(tmp);
 
temp = I915_READ(reg);
return true;
}
 
static void intel_disable_crt(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
 
temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}
 
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
 
temp = I915_READ(crt->adpa_reg);
temp |= ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}
 
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
 
temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
 
switch (mode) {
case DRM_MODE_DPMS_ON:
temp |= ADPA_DAC_ENABLE;
84,9 → 131,53
break;
}
 
I915_WRITE(reg, temp);
I915_WRITE(crt->adpa_reg, temp);
}
 
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
int old_dpms;
 
/* PCH platforms and VLV only support on/off. */
if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
 
old_dpms = connector->dpms;
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
crtc = encoder->base.crtc;
if (!crtc) {
encoder->connectors_active = false;
return;
}
 
/* We need the pipe to run for anything but OFF. */
if (mode == DRM_MODE_DPMS_OFF)
encoder->connectors_active = false;
else
encoder->connectors_active = true;
 
if (mode < old_dpms) {
/* From off to on, enable the pipe first. */
intel_crtc_update_dpms(crtc);
 
intel_crt_set_dpms(encoder, mode);
} else {
intel_crt_set_dpms(encoder, mode);
 
intel_crtc_update_dpms(crtc);
}
 
intel_modeset_check_state(connector->dev);
}
 
static int intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
110,7 → 201,7
}
 
static bool intel_crt_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
123,29 → 214,12
 
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crt *crt =
intel_encoder_to_crt(to_intel_encoder(encoder));
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
u32 adpa_reg;
u32 adpa;
 
dpll_md_reg = DPLL_MD(intel_crtc->pipe);
 
if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
 
/*
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
}
 
adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
163,7 → 237,7
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
 
I915_WRITE(adpa_reg, adpa);
I915_WRITE(crt->adpa_reg, adpa);
}
 
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
211,6 → 285,42
return ret;
}
 
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
 
save_adpa = adpa = I915_READ(ADPA);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
 
I915_WRITE(ADPA, adpa);
 
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(ADPA, save_adpa);
}
 
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(ADPA);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
 
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
 
/* FIXME: debug force function and remove */
ret = true;
 
return ret;
}
 
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence.
*
230,6 → 340,9
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
 
if (IS_VALLEYVIEW(dev))
return valleyview_crt_detect_hotplug(connector);
 
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
265,42 → 378,68
return ret;
}
 
static struct edid *intel_crt_get_edid(struct drm_connector *connector,
struct i2c_adapter *i2c)
{
struct edid *edid;
 
edid = drm_get_edid(connector, i2c);
 
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
edid = drm_get_edid(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
 
return edid;
}
 
/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */
static int intel_crt_ddc_get_modes(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
struct edid *edid;
 
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
 
return intel_connector_update_modes(connector, edid);
}
 
static bool intel_crt_detect_ddc(struct drm_connector *connector)
{
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
struct edid *edid;
struct i2c_adapter *i2c;
 
/* CRT should always be at 0, but check anyway */
if (crt->base.type != INTEL_OUTPUT_ANALOG)
return false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
struct edid *edid;
bool is_digital = false;
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
edid = intel_crt_get_edid(connector, i2c);
 
edid = drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
if (edid) {
bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
 
/*
* This may be a DVI-I connector with a shared DDC
* link between analog and digital outputs, so we
* have to check the EDID input spec of the attached device.
*
* On the other hand, what should we do if it is a broken EDID?
*/
if (edid != NULL) {
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
 
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
 
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
}
 
kfree(edid);
 
return false;
}
 
429,43 → 568,43
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_crtc *crtc;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
 
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
* only trust an assertion that the monitor is connected.
*/
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
} else {
} else
DRM_DEBUG_KMS("CRT not detected via hotplug\n");
return connector_status_disconnected;
}
}
 
if (intel_crt_detect_ddc(connector))
return connector_status_connected;
 
/* Load detection is broken on HPD capable machines. Whoever wants a
* broken monitor (without edid) to work behind a broken kvm (that fails
* to have the right resistors for HP detection) needs to fix this up.
* For now just bail out. */
if (I915_HAS_HOTPLUG(dev))
return connector_status_disconnected;
 
if (!force)
return connector->status;
 
/* for pre-945g platforms use load detect */
crtc = crt->base.base.crtc;
if (crtc && crtc->enabled) {
status = intel_crt_load_detect(crt);
} else {
struct intel_load_detect_pipe tmp;
 
if (intel_get_load_detect_pipe(&crt->base, connector, NULL,
&tmp)) {
if (intel_get_load_detect_pipe(connector, NULL, &tmp)) {
if (intel_crt_detect_ddc(connector))
status = connector_status_connected;
else
status = intel_crt_load_detect(crt);
intel_release_load_detect_pipe(&crt->base, connector,
&tmp);
intel_release_load_detect_pipe(connector, &tmp);
} else
status = connector_status_unknown;
}
 
return status;
}
482,15 → 621,16
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
struct i2c_adapter *i2c;
 
ret = intel_ddc_get_modes(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
if (ret || !IS_G4X(dev))
return ret;
 
/* Try to probe digital port for output in DVI-I -> VGA mode. */
return intel_ddc_get_modes(connector,
&dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
return intel_crt_ddc_get_modes(connector, i2c);
}
 
static int intel_crt_set_property(struct drm_connector *connector,
513,17 → 653,15
* Routines for controlling stuff on the analog port
*/
 
static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
.dpms = intel_crt_dpms,
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.prepare = intel_encoder_prepare,
.commit = intel_encoder_commit,
.mode_set = intel_crt_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = drm_helper_connector_dpms,
.dpms = intel_crt_dpms,
.detect = intel_crt_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = intel_crt_destroy,
567,14 → 705,31
intel_connector_attach_encoder(intel_connector, &crt->base);
 
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
1 << INTEL_ANALOG_CLONE_BIT |
1 << INTEL_SDVO_LVDS_CLONE_BIT);
crt->base.crtc_mask = (1 << 0) | (1 << 1);
crt->base.cloneable = true;
if (IS_HASWELL(dev) || IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
if (IS_GEN2(dev))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
 
drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
if (HAS_PCH_SPLIT(dev))
crt->adpa_reg = PCH_ADPA;
else if (IS_VALLEYVIEW(dev))
crt->adpa_reg = VLV_ADPA;
else
crt->adpa_reg = ADPA;
 
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
drm_sysfs_connector_add(connector);
/drivers/video/drm/i915/intel_ddi.c
0,0 → 1,819
/*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eugeni Dodonov <eugeni.dodonov@intel.com>
*
*/
 
#include "i915_drv.h"
#include "intel_drv.h"
 
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
*/
static const u32 hsw_ddi_translations_dp[] = {
0x00FFFFFF, 0x0006000E, /* DP parameters */
0x00D75FFF, 0x0005000A,
0x00C30FFF, 0x00040006,
0x80AAAFFF, 0x000B0000,
0x00FFFFFF, 0x0005000A,
0x00D75FFF, 0x000C0004,
0x80C30FFF, 0x000B0000,
0x00FFFFFF, 0x00040006,
0x80D75FFF, 0x000B0000,
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
 
static const u32 hsw_ddi_translations_fdi[] = {
0x00FFFFFF, 0x0007000E, /* FDI parameters */
0x00D75FFF, 0x000F000A,
0x00C30FFF, 0x00060006,
0x00AAAFFF, 0x001E0000,
0x00FFFFFF, 0x000F000A,
0x00D75FFF, 0x00160004,
0x00C30FFF, 0x001E0000,
0x00FFFFFF, 0x00060006,
0x00D75FFF, 0x001E0000,
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
 
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
const u32 *ddi_translations = ((use_fdi_mode) ?
hsw_ddi_translations_fdi :
hsw_ddi_translations_dp);
 
DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
port_name(port),
use_fdi_mode ? "FDI" : "DP");
 
WARN((use_fdi_mode && (port != PORT_E)),
"Programming port %c in FDI mode, this probably will not work.\n",
port_name(port));
 
for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
}
 
/* Program DDI buffers translations for DP. By default, program ports A-D in DP
* mode and port E for FDI.
*/
void intel_prepare_ddi(struct drm_device *dev)
{
int port;
 
if (IS_HASWELL(dev)) {
for (port = PORT_A; port < PORT_E; port++)
intel_prepare_ddi_buffers(dev, port, false);
 
/* DDI E is the suggested one to work in FDI mode, so program is as such by
* default. It will have to be re-programmed in case a digital DP output
* will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
}
}
 
static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_400MV_0DB_HSW,
DDI_BUF_EMP_400MV_3_5DB_HSW,
DDI_BUF_EMP_400MV_6DB_HSW,
DDI_BUF_EMP_400MV_9_5DB_HSW,
DDI_BUF_EMP_600MV_0DB_HSW,
DDI_BUF_EMP_600MV_3_5DB_HSW,
DDI_BUF_EMP_600MV_6DB_HSW,
DDI_BUF_EMP_800MV_0DB_HSW,
DDI_BUF_EMP_800MV_3_5DB_HSW
};
 
 
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
* both the DDI port and PCH receiver for the desired DDI buffer settings.
*
* The recommended port to work in FDI mode is DDI E, which we use here. Also,
* please note that when FDI mode is active on DDI E, it shares 2 lines with
* DDI A (which is used for eDP)
*/
 
void hsw_fdi_link_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
 
/* Configure CPU PLL, wait for warmup */
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE |
SPLL_PLL_FREQ_1350MHz |
SPLL_PLL_SCC);
 
/* Use SPLL to drive the output when in FDI mode */
I915_WRITE(PORT_CLK_SEL(PORT_E),
PORT_CLK_SEL_SPLL);
I915_WRITE(PIPE_CLK_SEL(pipe),
PIPE_CLK_SEL_PORT(PORT_E));
 
udelay(20);
 
/* Start the training iterating through available voltages and emphasis */
for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
 
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
temp |
DDI_BUF_CTL_ENABLE |
DDI_PORT_WIDTH_X2 |
hsw_ddi_buf_ctl_values[i]);
 
udelay(600);
 
/* We need to program FDI_RX_MISC with the default TP1 to TP2
* values before enabling the receiver, and configure the delay
* for the FDI timing generator to 90h. Luckily, all the other
* bits are supposed to be zeroed, so we can write those values
* directly.
*/
I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
FDI_RX_FDI_DELAY_90);
 
/* Enable CPU FDI Receiver with auto-training */
reg = FDI_RX_CTL(pipe);
I915_WRITE(reg,
I915_READ(reg) |
FDI_LINK_TRAIN_AUTO |
FDI_RX_ENABLE |
FDI_LINK_TRAIN_PATTERN_1_CPT |
FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_PORT_WIDTH_2X_LPT |
FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
udelay(100);
 
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
 
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
DP_TP_CTL_LINK_TRAIN_NORMAL |
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
 
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
temp = I915_READ(DDI_FUNC_CTL(pipe));
temp &= ~PIPE_DDI_PORT_MASK;
temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
PIPE_DDI_MODE_SELECT_FDI |
PIPE_DDI_FUNC_ENABLE |
PIPE_DDI_PORT_WIDTH_X2;
I915_WRITE(DDI_FUNC_CTL(pipe),
temp);
break;
} else {
DRM_ERROR("Error training BUF_CTL %d\n", i);
 
/* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
I915_WRITE(DP_TP_CTL(PORT_E),
I915_READ(DP_TP_CTL(PORT_E)) &
~DP_TP_CTL_ENABLE);
I915_WRITE(FDI_RX_CTL(pipe),
I915_READ(FDI_RX_CTL(pipe)) &
~FDI_RX_PLL_ENABLE);
continue;
}
}
 
DRM_DEBUG_KMS("FDI train done.\n");
}
 
/* For DDI connections, it is possible to support different outputs over the
* same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
* the time the output is detected what exactly is on the other end of it. This
* function aims at providing support for this detection and proper output
* configuration.
*/
void intel_ddi_init(struct drm_device *dev, enum port port)
{
/* For now, we don't do any proper output detection and assume that we
* handle HDMI only */
 
switch(port){
case PORT_A:
/* We don't handle eDP and DP yet */
DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
break;
/* Assume that the ports B, C and D are working in HDMI mode for now */
case PORT_B:
case PORT_C:
case PORT_D:
intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
port);
break;
}
}
 
/* WRPLL clock dividers */
struct wrpll_tmds_clock {
u32 clock;
u16 p; /* Post divider */
u16 n2; /* Feedback divider */
u16 r2; /* Reference divider */
};
 
/* Table of matching values for WRPLL clocks programming for each frequency.
* The code assumes this table is sorted. */
static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
{19750, 38, 25, 18},
{20000, 48, 32, 18},
{21000, 36, 21, 15},
{21912, 42, 29, 17},
{22000, 36, 22, 15},
{23000, 36, 23, 15},
{23500, 40, 40, 23},
{23750, 26, 16, 14},
{24000, 36, 24, 15},
{25000, 36, 25, 15},
{25175, 26, 40, 33},
{25200, 30, 21, 15},
{26000, 36, 26, 15},
{27000, 30, 21, 14},
{27027, 18, 100, 111},
{27500, 30, 29, 19},
{28000, 34, 30, 17},
{28320, 26, 30, 22},
{28322, 32, 42, 25},
{28750, 24, 23, 18},
{29000, 30, 29, 18},
{29750, 32, 30, 17},
{30000, 30, 25, 15},
{30750, 30, 41, 24},
{31000, 30, 31, 18},
{31500, 30, 28, 16},
{32000, 30, 32, 18},
{32500, 28, 32, 19},
{33000, 24, 22, 15},
{34000, 28, 30, 17},
{35000, 26, 32, 19},
{35500, 24, 30, 19},
{36000, 26, 26, 15},
{36750, 26, 46, 26},
{37000, 24, 23, 14},
{37762, 22, 40, 26},
{37800, 20, 21, 15},
{38000, 24, 27, 16},
{38250, 24, 34, 20},
{39000, 24, 26, 15},
{40000, 24, 32, 18},
{40500, 20, 21, 14},
{40541, 22, 147, 89},
{40750, 18, 19, 14},
{41000, 16, 17, 14},
{41500, 22, 44, 26},
{41540, 22, 44, 26},
{42000, 18, 21, 15},
{42500, 22, 45, 26},
{43000, 20, 43, 27},
{43163, 20, 24, 15},
{44000, 18, 22, 15},
{44900, 20, 108, 65},
{45000, 20, 25, 15},
{45250, 20, 52, 31},
{46000, 18, 23, 15},
{46750, 20, 45, 26},
{47000, 20, 40, 23},
{48000, 18, 24, 15},
{49000, 18, 49, 30},
{49500, 16, 22, 15},
{50000, 18, 25, 15},
{50500, 18, 32, 19},
{51000, 18, 34, 20},
{52000, 18, 26, 15},
{52406, 14, 34, 25},
{53000, 16, 22, 14},
{54000, 16, 24, 15},
{54054, 16, 173, 108},
{54500, 14, 24, 17},
{55000, 12, 22, 18},
{56000, 14, 45, 31},
{56250, 16, 25, 15},
{56750, 14, 25, 17},
{57000, 16, 27, 16},
{58000, 16, 43, 25},
{58250, 16, 38, 22},
{58750, 16, 40, 23},
{59000, 14, 26, 17},
{59341, 14, 40, 26},
{59400, 16, 44, 25},
{60000, 16, 32, 18},
{60500, 12, 39, 29},
{61000, 14, 49, 31},
{62000, 14, 37, 23},
{62250, 14, 42, 26},
{63000, 12, 21, 15},
{63500, 14, 28, 17},
{64000, 12, 27, 19},
{65000, 14, 32, 19},
{65250, 12, 29, 20},
{65500, 12, 32, 22},
{66000, 12, 22, 15},
{66667, 14, 38, 22},
{66750, 10, 21, 17},
{67000, 14, 33, 19},
{67750, 14, 58, 33},
{68000, 14, 30, 17},
{68179, 14, 46, 26},
{68250, 14, 46, 26},
{69000, 12, 23, 15},
{70000, 12, 28, 18},
{71000, 12, 30, 19},
{72000, 12, 24, 15},
{73000, 10, 23, 17},
{74000, 12, 23, 14},
{74176, 8, 100, 91},
{74250, 10, 22, 16},
{74481, 12, 43, 26},
{74500, 10, 29, 21},
{75000, 12, 25, 15},
{75250, 10, 39, 28},
{76000, 12, 27, 16},
{77000, 12, 53, 31},
{78000, 12, 26, 15},
{78750, 12, 28, 16},
{79000, 10, 38, 26},
{79500, 10, 28, 19},
{80000, 12, 32, 18},
{81000, 10, 21, 14},
{81081, 6, 100, 111},
{81624, 8, 29, 24},
{82000, 8, 17, 14},
{83000, 10, 40, 26},
{83950, 10, 28, 18},
{84000, 10, 28, 18},
{84750, 6, 16, 17},
{85000, 6, 17, 18},
{85250, 10, 30, 19},
{85750, 10, 27, 17},
{86000, 10, 43, 27},
{87000, 10, 29, 18},
{88000, 10, 44, 27},
{88500, 10, 41, 25},
{89000, 10, 28, 17},
{89012, 6, 90, 91},
{89100, 10, 33, 20},
{90000, 10, 25, 15},
{91000, 10, 32, 19},
{92000, 10, 46, 27},
{93000, 10, 31, 18},
{94000, 10, 40, 23},
{94500, 10, 28, 16},
{95000, 10, 44, 25},
{95654, 10, 39, 22},
{95750, 10, 39, 22},
{96000, 10, 32, 18},
{97000, 8, 23, 16},
{97750, 8, 42, 29},
{98000, 8, 45, 31},
{99000, 8, 22, 15},
{99750, 8, 34, 23},
{100000, 6, 20, 18},
{100500, 6, 19, 17},
{101000, 6, 37, 33},
{101250, 8, 21, 14},
{102000, 6, 17, 15},
{102250, 6, 25, 22},
{103000, 8, 29, 19},
{104000, 8, 37, 24},
{105000, 8, 28, 18},
{106000, 8, 22, 14},
{107000, 8, 46, 29},
{107214, 8, 27, 17},
{108000, 8, 24, 15},
{108108, 8, 173, 108},
{109000, 6, 23, 19},
{110000, 6, 22, 18},
{110013, 6, 22, 18},
{110250, 8, 49, 30},
{110500, 8, 36, 22},
{111000, 8, 23, 14},
{111264, 8, 150, 91},
{111375, 8, 33, 20},
{112000, 8, 63, 38},
{112500, 8, 25, 15},
{113100, 8, 57, 34},
{113309, 8, 42, 25},
{114000, 8, 27, 16},
{115000, 6, 23, 18},
{116000, 8, 43, 25},
{117000, 8, 26, 15},
{117500, 8, 40, 23},
{118000, 6, 38, 29},
{119000, 8, 30, 17},
{119500, 8, 46, 26},
{119651, 8, 39, 22},
{120000, 8, 32, 18},
{121000, 6, 39, 29},
{121250, 6, 31, 23},
{121750, 6, 23, 17},
{122000, 6, 42, 31},
{122614, 6, 30, 22},
{123000, 6, 41, 30},
{123379, 6, 37, 27},
{124000, 6, 51, 37},
{125000, 6, 25, 18},
{125250, 4, 13, 14},
{125750, 4, 27, 29},
{126000, 6, 21, 15},
{127000, 6, 24, 17},
{127250, 6, 41, 29},
{128000, 6, 27, 19},
{129000, 6, 43, 30},
{129859, 4, 25, 26},
{130000, 6, 26, 18},
{130250, 6, 42, 29},
{131000, 6, 32, 22},
{131500, 6, 38, 26},
{131850, 6, 41, 28},
{132000, 6, 22, 15},
{132750, 6, 28, 19},
{133000, 6, 34, 23},
{133330, 6, 37, 25},
{134000, 6, 61, 41},
{135000, 6, 21, 14},
{135250, 6, 167, 111},
{136000, 6, 62, 41},
{137000, 6, 35, 23},
{138000, 6, 23, 15},
{138500, 6, 40, 26},
{138750, 6, 37, 24},
{139000, 6, 34, 22},
{139050, 6, 34, 22},
{139054, 6, 34, 22},
{140000, 6, 28, 18},
{141000, 6, 36, 23},
{141500, 6, 22, 14},
{142000, 6, 30, 19},
{143000, 6, 27, 17},
{143472, 4, 17, 16},
{144000, 6, 24, 15},
{145000, 6, 29, 18},
{146000, 6, 47, 29},
{146250, 6, 26, 16},
{147000, 6, 49, 30},
{147891, 6, 23, 14},
{148000, 6, 23, 14},
{148250, 6, 28, 17},
{148352, 4, 100, 91},
{148500, 6, 33, 20},
{149000, 6, 48, 29},
{150000, 6, 25, 15},
{151000, 4, 19, 17},
{152000, 6, 27, 16},
{152280, 6, 44, 26},
{153000, 6, 34, 20},
{154000, 6, 53, 31},
{155000, 6, 31, 18},
{155250, 6, 50, 29},
{155750, 6, 45, 26},
{156000, 6, 26, 15},
{157000, 6, 61, 35},
{157500, 6, 28, 16},
{158000, 6, 65, 37},
{158250, 6, 44, 25},
{159000, 6, 53, 30},
{159500, 6, 39, 22},
{160000, 6, 32, 18},
{161000, 4, 31, 26},
{162000, 4, 18, 15},
{162162, 4, 131, 109},
{162500, 4, 53, 44},
{163000, 4, 29, 24},
{164000, 4, 17, 14},
{165000, 4, 22, 18},
{166000, 4, 32, 26},
{167000, 4, 26, 21},
{168000, 4, 46, 37},
{169000, 4, 104, 83},
{169128, 4, 64, 51},
{169500, 4, 39, 31},
{170000, 4, 34, 27},
{171000, 4, 19, 15},
{172000, 4, 51, 40},
{172750, 4, 32, 25},
{172800, 4, 32, 25},
{173000, 4, 41, 32},
{174000, 4, 49, 38},
{174787, 4, 22, 17},
{175000, 4, 35, 27},
{176000, 4, 30, 23},
{177000, 4, 38, 29},
{178000, 4, 29, 22},
{178500, 4, 37, 28},
{179000, 4, 53, 40},
{179500, 4, 73, 55},
{180000, 4, 20, 15},
{181000, 4, 55, 41},
{182000, 4, 31, 23},
{183000, 4, 42, 31},
{184000, 4, 30, 22},
{184750, 4, 26, 19},
{185000, 4, 37, 27},
{186000, 4, 51, 37},
{187000, 4, 36, 26},
{188000, 4, 32, 23},
{189000, 4, 21, 15},
{190000, 4, 38, 27},
{190960, 4, 41, 29},
{191000, 4, 41, 29},
{192000, 4, 27, 19},
{192250, 4, 37, 26},
{193000, 4, 20, 14},
{193250, 4, 53, 37},
{194000, 4, 23, 16},
{194208, 4, 23, 16},
{195000, 4, 26, 18},
{196000, 4, 45, 31},
{197000, 4, 35, 24},
{197750, 4, 41, 28},
{198000, 4, 22, 15},
{198500, 4, 25, 17},
{199000, 4, 28, 19},
{200000, 4, 37, 25},
{201000, 4, 61, 41},
{202000, 4, 112, 75},
{202500, 4, 21, 14},
{203000, 4, 146, 97},
{204000, 4, 62, 41},
{204750, 4, 44, 29},
{205000, 4, 38, 25},
{206000, 4, 29, 19},
{207000, 4, 23, 15},
{207500, 4, 40, 26},
{208000, 4, 37, 24},
{208900, 4, 48, 31},
{209000, 4, 48, 31},
{209250, 4, 31, 20},
{210000, 4, 28, 18},
{211000, 4, 25, 16},
{212000, 4, 22, 14},
{213000, 4, 30, 19},
{213750, 4, 38, 24},
{214000, 4, 46, 29},
{214750, 4, 35, 22},
{215000, 4, 43, 27},
{216000, 4, 24, 15},
{217000, 4, 37, 23},
{218000, 4, 42, 26},
{218250, 4, 42, 26},
{218750, 4, 34, 21},
{219000, 4, 47, 29},
{220000, 4, 44, 27},
{220640, 4, 49, 30},
{220750, 4, 36, 22},
{221000, 4, 36, 22},
{222000, 4, 23, 14},
{222525, 4, 28, 17},
{222750, 4, 33, 20},
{227000, 4, 37, 22},
{230250, 4, 29, 17},
{233500, 4, 38, 22},
{235000, 4, 40, 23},
{238000, 4, 30, 17},
{241500, 2, 17, 19},
{245250, 2, 20, 22},
{247750, 2, 22, 24},
{253250, 2, 15, 16},
{256250, 2, 18, 19},
{262500, 2, 31, 32},
{267250, 2, 66, 67},
{268500, 2, 94, 95},
{270000, 2, 14, 14},
{272500, 2, 77, 76},
{273750, 2, 57, 56},
{280750, 2, 24, 23},
{281250, 2, 23, 22},
{286000, 2, 17, 16},
{291750, 2, 26, 24},
{296703, 2, 56, 51},
{297000, 2, 22, 20},
{298000, 2, 21, 19},
};
 
void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
int pipe = intel_crtc->pipe;
int p, n2, r2;
u32 temp, i;
 
/* On Haswell, we need to enable the clocks and prepare DDI function to
* work in HDMI mode for this pipe.
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
 
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
break;
 
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
 
p = wrpll_tmds_clock_table[i].p;
n2 = wrpll_tmds_clock_table[i].n2;
r2 = wrpll_tmds_clock_table[i].r2;
 
if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
 
DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
crtc->mode.clock, p, n2, r2);
 
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
if (temp & LCPLL_PLL_DISABLE)
I915_WRITE(LCPLL_CTL,
temp & ~LCPLL_PLL_DISABLE);
 
/* Configure WR PLL 1, program the correct divider values for
* the desired frequency and wait for warmup */
I915_WRITE(WRPLL_CTL1,
WRPLL_PLL_ENABLE |
WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) |
WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p));
 
udelay(20);
 
/* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
* this port for connection.
*/
I915_WRITE(PORT_CLK_SEL(port),
PORT_CLK_SEL_WRPLL1);
I915_WRITE(PIPE_CLK_SEL(pipe),
PIPE_CLK_SEL_PORT(port));
 
udelay(20);
 
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic and a new set
* of registers, so we leave it for future patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
}
 
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
 
switch (intel_crtc->bpp) {
case 18:
temp |= PIPE_DDI_BPC_6;
break;
case 24:
temp |= PIPE_DDI_BPC_8;
break;
case 30:
temp |= PIPE_DDI_BPC_10;
break;
case 36:
temp |= PIPE_DDI_BPC_12;
break;
default:
WARN(1, "%d bpp unsupported by pipe DDI function\n",
intel_crtc->bpp);
}
 
if (intel_hdmi->has_hdmi_sink)
temp |= PIPE_DDI_MODE_SELECT_HDMI;
else
temp |= PIPE_DDI_MODE_SELECT_DVI;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= PIPE_DDI_PVSYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= PIPE_DDI_PHSYNC;
 
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
 
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
 
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 tmp;
int i;
 
tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
 
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
 
for_each_pipe(i) {
tmp = I915_READ(DDI_FUNC_CTL(i));
 
if ((tmp & PIPE_DDI_PORT_MASK)
== PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
*pipe = i;
return true;
}
}
 
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
 
return true;
}
 
void intel_enable_ddi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
 
temp = I915_READ(DDI_BUF_CTL(port));
temp |= DDI_BUF_CTL_ENABLE;
 
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
* and swing/emphasis values are ignored so nothing special needs
* to be done besides enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port), temp);
}
 
void intel_disable_ddi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
 
temp = I915_READ(DDI_BUF_CTL(port));
temp &= ~DDI_BUF_CTL_ENABLE;
 
I915_WRITE(DDI_BUF_CTL(port), temp);
}
/drivers/video/drm/i915/intel_display.c
32,13 → 32,14
#include <linux/slab.h>
//#include <linux/vgaarb.h>
#include <drm/drm_edid.h>
#include "drmP.h"
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
#include <drm/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
//#include <linux/dma_remapping.h>
 
phys_addr_t get_bus_addr(void);
 
63,20 → 64,11
}
 
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
 
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
//static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
typedef struct {
/* given values */
105,7 → 97,7
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
int, int, intel_clock_t *, intel_clock_t *);
};
 
/* FDI */
113,18 → 105,27
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static bool
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock);
 
static inline u32 /* units of 100MHz */
intel_fdi_link_freq(struct drm_device *dev)
{
386,6 → 387,152
.find_pll = intel_find_pll_ironlake_dp,
};
 
static const intel_limit_t intel_limits_vlv_dac = {
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 22, .max = 450 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
.vco = { .min = 5994000, .max = 4000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
static const intel_limit_t intel_limits_vlv_dp = {
.dot = { .min = 162000, .max = 270000 },
.vco = { .min = 5994000, .max = 4000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
.p1 = { .min = 2, .max = 3 },
.p2 = { .dot_limit = 270000,
.p2_slow = 2, .p2_fast = 20 },
.find_pll = intel_vlv_find_best_pll,
};
 
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
unsigned long flags;
u32 val = 0;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
goto out_unlock;
}
 
I915_WRITE(DPIO_REG, reg);
I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
goto out_unlock;
}
val = I915_READ(DPIO_DATA);
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
return val;
}
 
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
goto out_unlock;
}
 
I915_WRITE(DPIO_DATA, val);
I915_WRITE(DPIO_REG, reg);
I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
 
static void vlv_init_dpio(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Reset the DPIO config */
I915_WRITE(DPIO_CTL, 0);
POSTING_READ(DPIO_CTL);
I915_WRITE(DPIO_CTL, 1);
POSTING_READ(DPIO_CTL);
}
 
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
{
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
return 1;
}
 
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
{ } /* terminating entry */
};
 
static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
unsigned int reg)
{
unsigned int val;
 
/* use the module option value if specified */
if (i915_lvds_channel_mode > 0)
return i915_lvds_channel_mode == 2;
 
// if (dmi_check_system(intel_dual_link_lvds))
// return true;
 
if (dev_priv->lvds_val)
val = dev_priv->lvds_val;
else {
/* BIOS should set the proper LVDS register value at boot, but
* in reality, it doesn't set the value when the lid is closed;
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
val = I915_READ(reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
dev_priv->lvds_val = val;
}
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
 
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
394,8 → 541,7
const intel_limit_t *limit;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
423,8 → 569,7
const intel_limit_t *limit;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
if (is_dual_link_lvds(dev_priv, LVDS))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
457,6 → 602,13
limit = &intel_limits_pineview_lvds;
else
limit = &intel_limits_pineview_sdvo;
} else if (IS_VALLEYVIEW(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
limit = &intel_limits_vlv_dac;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
limit = &intel_limits_vlv_hdmi;
else
limit = &intel_limits_vlv_dp;
} else if (!IS_GEN2(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i9xx_lvds;
498,11 → 650,10
bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->base.crtc == crtc && encoder->type == type)
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->type == type)
return true;
 
return false;
545,7 → 696,8
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
 
{
struct drm_device *dev = crtc->dev;
561,8 → 713,7
* reliably set up different single/dual channel state, if we
* even can.
*/
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
if (is_dual_link_lvds(dev_priv, LVDS))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
592,6 → 743,9
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
if (match_clock &&
clock.p != match_clock->p)
continue;
 
this_err = abs(clock.dot - target);
if (this_err < err) {
608,7 → 762,8
 
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
655,6 → 810,9
if (!intel_PLL_is_valid(dev, limit,
&clock))
continue;
if (match_clock &&
clock.p != match_clock->p)
continue;
 
this_err = abs(clock.dot - target);
if (this_err < err_most) {
672,7 → 830,8
 
static bool
intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
698,7 → 857,8
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
intel_clock_t clock;
if (target < 200000) {
721,7 → 881,86
memcpy(best_clock, &clock, sizeof(intel_clock_t));
return true;
}
static bool
intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
intel_clock_t *best_clock)
{
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
u32 m, n, fastclk;
u32 updrate, minupdate, fracbits, p;
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
 
flag = 0;
dotclk = target * 1000;
bestppm = 1000000;
ppm = absppm = 0;
fastclk = dotclk / (2*100);
updrate = 0;
minupdate = 19200;
fracbits = 1;
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
bestm1 = bestm2 = bestp1 = bestp2 = 0;
 
/* based on hardware requirement, prefer smaller n to precision */
for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
updrate = refclk / n;
for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
if (p2 > 10)
p2 = p2 - 1;
p = p1 * p2;
/* based on hardware requirement, prefer bigger m1,m2 values */
for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
m2 = (((2*(fastclk * p * n / m1 )) +
refclk) / (2*refclk));
m = m1 * m2;
vco = updrate * m;
if (vco >= limit->vco.min && vco < limit->vco.max) {
ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
absppm = (ppm > 0) ? ppm : (-ppm);
if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
bestppm = 0;
flag = 1;
}
if (absppm < bestppm - 10) {
bestppm = absppm;
flag = 1;
}
if (flag) {
bestn = n;
bestm1 = m1;
bestm2 = m2;
bestp1 = p1;
bestp2 = p2;
flag = 0;
}
}
}
}
}
}
best_clock->n = bestn;
best_clock->m1 = bestm1;
best_clock->m2 = bestm2;
best_clock->p1 = bestp1;
best_clock->p2 = bestp2;
 
return true;
}
 
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 frame, frame_reg = PIPEFRAME(pipe);
 
frame = I915_READ(frame_reg);
 
if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
DRM_DEBUG_KMS("vblank wait timed out\n");
}
 
/**
* intel_wait_for_vblank - wait for vblank on a given pipe
* @dev: drm device
735,6 → 974,11
struct drm_i915_private *dev_priv = dev->dev_private;
int pipestat_reg = PIPESTAT(pipe);
 
if (INTEL_INFO(dev)->gen >= 5) {
ironlake_wait_for_vblank(dev, pipe);
return;
}
 
/* Clear existing vblank status. Note this will clear any other
* sticky status fields as well.
*
785,20 → 1029,25
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
100))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
WARN(1, "pipe_off wait timed out\n");
} else {
u32 last_line;
u32 last_line, line_mask;
int reg = PIPEDSL(pipe);
unsigned long timeout = jiffies + msecs_to_jiffies(100);
unsigned long timeout = GetTimerTicks() + msecs_to_jiffies(100);
 
if (IS_GEN2(dev))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
 
/* Wait for the display line to settle */
do {
last_line = I915_READ(reg) & DSL_LINEMASK;
last_line = I915_READ(reg) & line_mask;
mdelay(5);
} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
time_after(timeout, jiffies));
if (time_after(jiffies, timeout))
DRM_DEBUG_KMS("pipe_off wait timed out\n");
} while (((I915_READ(reg) & line_mask) != last_line) &&
time_after(timeout, GetTimerTicks()));
if (time_after(GetTimerTicks(), timeout))
WARN(1, "pipe_off wait timed out\n");
}
}
 
827,34 → 1076,49
 
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
struct intel_pch_pll *pll,
struct intel_crtc *crtc,
bool state)
{
int reg;
u32 val;
bool cur_state;
 
if (HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
return;
}
 
pch_dpll = I915_READ(PCH_DPLL_SEL);
if (WARN (!pll,
"asserting PCH PLL %s with no PLL\n", state_string(state)))
return;
 
/* Make sure the selected PLL is enabled to the transcoder */
WARN(!((pch_dpll >> (4 * pipe)) & 8),
"transcoder %d PLL not enabled\n", pipe);
val = I915_READ(pll->pll_reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
WARN(cur_state != state,
"PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
pll->pll_reg, state_string(state), state_string(cur_state), val);
 
/* Convert the transcoder pipe number to a pll pipe number */
pipe = (pch_dpll >> (4 * pipe)) & 1;
}
/* Make sure the selected PLL is correctly attached to the transcoder */
if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
 
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
cur_state = !!(val & DPLL_VCO_ENABLE);
pch_dpll = I915_READ(PCH_DPLL_SEL);
cur_state = pll->pll_reg == _PCH_DPLL_B;
if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
"PLL[%d] not attached to this transcoder %d: %08x\n",
cur_state, crtc->pipe, pch_dpll)) {
cur_state = !!(val >> (4*crtc->pipe + 3));
WARN(cur_state != state,
"PCH PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
"PLL[%d] not %s on this transcoder %d: %08x\n",
pll->pll_reg == _PCH_DPLL_B,
state_string(state),
crtc->pipe,
val);
}
#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
}
}
#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
 
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
863,9 → 1127,16
u32 val;
bool cur_state;
 
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
reg = DDI_FUNC_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_TX_ENABLE);
}
WARN(cur_state != state,
"FDI TX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
880,9 → 1151,14
u32 val;
bool cur_state;
 
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
return;
} else {
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
}
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
900,6 → 1176,10
if (dev_priv->info->gen == 5)
return;
 
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
if (IS_HASWELL(dev_priv->dev))
return;
 
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
911,6 → 1191,10
int reg;
u32 val;
 
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
return;
}
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
952,6 → 1236,10
u32 val;
bool cur_state;
 
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
 
reg = PIPECONF(pipe);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
960,19 → 1248,24
pipe_name(pipe), state_string(state), state_string(cur_state));
}
 
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
static void assert_plane(struct drm_i915_private *dev_priv,
enum plane plane, bool state)
{
int reg;
u32 val;
bool cur_state;
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
WARN(!(val & DISPLAY_PLANE_ENABLE),
"plane %c assertion failure, should be active but is disabled\n",
plane_name(plane));
cur_state = !!(val & DISPLAY_PLANE_ENABLE);
WARN(cur_state != state,
"plane %c assertion failure (expected %s, current %s)\n",
plane_name(plane), state_string(state), state_string(cur_state));
}
 
#define assert_plane_enabled(d, p) assert_plane(d, p, true)
#define assert_plane_disabled(d, p) assert_plane(d, p, false)
 
static void assert_planes_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
981,8 → 1274,14
int cur_pipe;
 
/* Planes are fixed to pipes on ILK+ */
if (HAS_PCH_SPLIT(dev_priv->dev))
if (HAS_PCH_SPLIT(dev_priv->dev)) {
reg = DSPCNTR(pipe);
val = I915_READ(reg);
WARN((val & DISPLAY_PLANE_ENABLE),
"plane %c assertion failure, should be disabled but not\n",
plane_name(pipe));
return;
}
 
/* Need to check both planes against the pipe */
for (i = 0; i < 2; i++) {
1001,6 → 1300,11
u32 val;
bool enabled;
 
if (HAS_PCH_LPT(dev_priv->dev)) {
DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
return;
}
 
val = I915_READ(PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
1094,6 → 1398,10
WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
 
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
&& (val & DP_PIPEB_SELECT),
"IBX PCH dp port still using transcoder B\n");
}
 
static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1100,9 → 1408,13
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
"PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
"PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
reg, pipe_name(pipe));
 
WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
&& (val & SDVO_PIPE_B_SELECT),
"IBX PCH hdmi port still using transcoder B\n");
}
 
static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1117,13 → 1429,13
 
reg = PCH_ADPA;
val = I915_READ(reg);
WARN(adpa_pipe_enabled(dev_priv, val, pipe),
WARN(adpa_pipe_enabled(dev_priv, pipe, val),
"PCH VGA enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
 
reg = PCH_LVDS;
val = I915_READ(reg);
WARN(lvds_pipe_enabled(dev_priv, val, pipe),
WARN(lvds_pipe_enabled(dev_priv, pipe, val),
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
 
1142,6 → 1454,8
* protect mechanism may be enabled.
*
* Note! This is for pre-ILK only.
*
* Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
*/
static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
1149,7 → 1463,7
u32 val;
 
/* No really, not for ILK+ */
BUG_ON(dev_priv->info->gen >= 5);
BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
 
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1199,6 → 1513,69
POSTING_READ(reg);
}
 
/* SBI access */
static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
{
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
 
I915_WRITE(SBI_ADDR,
(reg << 16));
I915_WRITE(SBI_DATA,
value);
I915_WRITE(SBI_CTL_STAT,
SBI_BUSY |
SBI_CTL_OP_CRWR);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
goto out_unlock;
}
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
 
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
{
unsigned long flags;
u32 value = 0;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
 
I915_WRITE(SBI_ADDR,
(reg << 16));
I915_WRITE(SBI_CTL_STAT,
SBI_BUSY |
SBI_CTL_OP_CRRD);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
goto out_unlock;
}
 
value = I915_READ(SBI_DATA);
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
return value;
}
 
/**
* intel_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
1207,60 → 1584,88
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
int reg;
u32 val;
 
if (pipe > 1)
/* PCH PLLs only available on ILK, SNB and IVB */
BUG_ON(dev_priv->info->gen < 5);
pll = intel_crtc->pch_pll;
if (pll == NULL)
return;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
if (WARN_ON(pll->refcount == 0))
return;
 
DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
 
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
 
reg = PCH_DPLL(pipe);
if (pll->active++ && pll->on) {
assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
 
DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
 
reg = pll->pll_reg;
val = I915_READ(reg);
val |= DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
 
pll->on = true;
}
 
static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll = intel_crtc->pch_pll;
int reg;
u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
pll_sel = TRANSC_DPLL_ENABLE;
u32 val;
 
if (pipe > 1)
return;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
if (pll == NULL)
return;
 
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
if (WARN_ON(pll->refcount == 0))
return;
 
if (pipe == 0)
pll_sel |= TRANSC_DPLLA_SEL;
else if (pipe == 1)
pll_sel |= TRANSC_DPLLB_SEL;
DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
pll->pll_reg, pll->active, pll->on,
intel_crtc->base.base.id);
 
if (WARN_ON(pll->active == 0)) {
assert_pch_pll_disabled(dev_priv, pll, NULL);
return;
}
 
if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
if (--pll->active) {
assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
 
reg = PCH_DPLL(pipe);
DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
 
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
 
reg = pll->pll_reg;
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
udelay(200);
 
pll->on = false;
}
 
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1267,20 → 1672,28
enum pipe pipe)
{
int reg;
u32 val;
u32 val, pipeconf_val;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
 
/* Make sure PCH DPLL is enabled */
assert_pch_pll_enabled(dev_priv, pipe);
assert_pch_pll_enabled(dev_priv,
to_intel_crtc(crtc)->pch_pll,
to_intel_crtc(crtc));
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
 
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
return;
}
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/*
1288,8 → 1701,19
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
val |= pipeconf_val & PIPE_BPC_MASK;
}
 
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv->dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
else
val |= TRANS_PROGRESSIVE;
 
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
1397,6 → 1821,7
 
I915_WRITE(reg, val & ~PIPECONF_ENABLE);
intel_wait_for_pipe_off(dev_priv->dev, pipe);
 
}
 
/*
1403,7 → 1828,7
* Plane regs are double buffered, going from enabled->disabled needs a
* trigger in order to latch. The display address reg provides this.
*/
static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1461,500 → 1886,6
intel_wait_for_vblank(dev_priv->dev, pipe);
}
 
static void disable_pch_dp(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg, u32 port_sel)
{
u32 val = I915_READ(reg);
if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
I915_WRITE(reg, val & ~DP_PORT_EN);
}
}
 
static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
enum pipe pipe, int reg)
{
u32 val = I915_READ(reg);
if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
reg, pipe);
I915_WRITE(reg, val & ~PORT_ENABLE);
}
}
 
/* Disable any ports connected to this transcoder */
static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
u32 reg, val;
 
val = I915_READ(PCH_PP_CONTROL);
I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
 
disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
 
reg = PCH_ADPA;
val = I915_READ(reg);
if (adpa_pipe_enabled(dev_priv, val, pipe))
I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
 
reg = PCH_LVDS;
val = I915_READ(reg);
if (lvds_pipe_enabled(dev_priv, val, pipe)) {
DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
I915_WRITE(reg, val & ~LVDS_PORT_EN);
POSTING_READ(reg);
udelay(100);
}
 
disable_pch_hdmi(dev_priv, pipe, HDMIB);
disable_pch_hdmi(dev_priv, pipe, HDMIC);
disable_pch_hdmi(dev_priv, pipe, HDMID);
}
 
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
 
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
 
DRM_DEBUG_KMS("disabled FBC\n");
}
 
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
 
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= plane;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
 
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
cfb_pitch, crtc->y, intel_crtc->plane);
}
 
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
 
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
 
static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void sandybridge_blit_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
 
/* Make sure blitter notifies FBC of writes */
gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv);
}
 
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
 
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
 
static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool ironlake_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!dev_priv->display.fbc_enabled)
return false;
 
return dev_priv->display.fbc_enabled(dev);
}
 
 
 
 
 
 
 
 
 
 
static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!dev_priv->display.enable_fbc)
return;
 
// intel_cancel_fbc_work(dev_priv);
 
// work = kzalloc(sizeof *work, GFP_KERNEL);
// if (work == NULL) {
// dev_priv->display.enable_fbc(crtc, interval);
// return;
// }
 
// work->crtc = crtc;
// work->fb = crtc->fb;
// work->interval = interval;
// INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
// dev_priv->fbc_work = work;
 
DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
 
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*/
// schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
 
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
// intel_cancel_fbc_work(dev_priv);
 
if (!dev_priv->display.disable_fbc)
return;
 
dev_priv->display.disable_fbc(dev);
dev_priv->cfb_plane = -1;
}
 
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= 2048 in width, 1536 in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
static void intel_update_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
 
DRM_DEBUG_KMS("\n");
 
if (!i915_powersave)
return;
 
if (!I915_HAS_FBC(dev))
return;
 
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (tmp_crtc->enabled && tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
}
}
 
if (!crtc || crtc->fb == NULL) {
DRM_DEBUG_KMS("no output, disabling\n");
dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
 
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
enable_fbc = i915_enable_fbc;
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((crtc->mode.hdisplay > 2048) ||
(crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
// if (obj->tiling_mode != I915_TILING_X ||
// obj->fence_reg == I915_FENCE_REG_NONE) {
// DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
// dev_priv->no_fbc_reason = FBC_NOT_TILED;
// goto out_disable;
// }
 
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_fb == fb->base.id &&
dev_priv->cfb_y == crtc->y)
return;
 
if (intel_fbc_enabled(dev)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
intel_disable_fbc(dev);
}
 
intel_enable_fbc(crtc, 500);
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
}
 
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
2011,6 → 1942,28
return ret;
}
 
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
// i915_gem_object_unpin_fence(obj);
// i915_gem_object_unpin(obj);
}
 
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned int pitch)
{
int tile_rows, tiles;
 
tile_rows = *y / 8;
*y %= 8;
tiles = *x / (512/bpp);
*x %= 512/bpp;
 
return tile_rows * pitch * 8 + tiles * 4096;
}
 
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
{
2020,7 → 1973,7
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
 
2067,18 → 2020,28
 
I915_WRITE(reg, dspcntr);
 
Start = obj->gtt_offset;
Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
gen4_compute_dspaddr_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
} else {
intel_crtc->dspaddr_offset = linear_offset;
}
 
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
I915_WRITE(DSPADDR(plane), Start + Offset);
I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
POSTING_READ(reg);
 
return 0;
2093,7 → 2056,7
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
unsigned long linear_offset;
u32 dspcntr;
u32 reg;
 
2148,15 → 2111,20
 
I915_WRITE(reg, dspcntr);
 
Start = obj->gtt_offset;
Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
gen4_compute_dspaddr_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
 
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
Start, Offset, x, y, fb->pitches[0]);
DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane), Start);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
I915_WRITE(DSPLINOFF(plane), linear_offset);
POSTING_READ(reg);
 
return 0;
2169,54 → 2137,83
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
intel_increase_pllclock(crtc);
 
return dev_priv->display.update_plane(crtc, fb, x, y);
}
 
#if 0
static int
intel_finish_fb(struct drm_framebuffer *old_fb)
{
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
 
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret)
return ret;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
 
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
dev_priv->mm.interruptible = false;
ret = i915_gem_object_finish_gpu(obj);
dev_priv->mm.interruptible = was_interruptible;
 
return 0;
return ret;
}
#endif
 
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
int ret;
 
ENTER();
 
/* no fb bound */
if (!crtc->fb) {
if (!fb) {
DRM_ERROR("No FB bound\n");
return 0;
}
 
switch (intel_crtc->plane) {
case 0:
case 1:
break;
case 2:
if (IS_IVYBRIDGE(dev))
break;
/* fall through otherwise */
default:
DRM_ERROR("no plane for crtc\n");
if(intel_crtc->plane > dev_priv->num_pipe) {
DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
intel_crtc->plane,
dev_priv->num_pipe);
return -EINVAL;
}
 
mutex_lock(&dev->struct_mutex);
// ret = intel_pin_and_fence_fb_obj(dev,
// to_intel_framebuffer(fb)->obj,
// NULL);
// if (ret != 0) {
// mutex_unlock(&dev->struct_mutex);
// DRM_ERROR("pin & fence failed\n");
// return ret;
// }
 
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
LEAVE_ATOMIC_MODE_SET);
// if (crtc->fb)
// intel_finish_fb(crtc->fb);
 
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
mutex_unlock(&dev->struct_mutex);
DRM_ERROR("failed to update base address\n");
LEAVE();
2223,13 → 2220,20
return ret;
}
 
old_fb = crtc->fb;
crtc->fb = fb;
crtc->x = x;
crtc->y = y;
 
if (old_fb) {
intel_wait_for_vblank(dev, intel_crtc->pipe);
intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
}
 
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
 
LEAVE();
return 0;
 
 
}
 
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2433,7 → 2437,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
u32 reg, temp, i, retry;
 
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
2485,16 → 2489,20
POSTING_READ(reg);
udelay(500);
 
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
if (temp & FDI_RX_BIT_LOCK) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
break;
}
udelay(50);
}
if (retry < 5)
break;
}
if (i == 4)
DRM_ERROR("FDI train 1 fail!\n");
 
2534,16 → 2542,20
POSTING_READ(reg);
udelay(500);
 
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
temp = I915_READ(reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
 
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
udelay(50);
}
if (retry < 5)
break;
}
if (i == 4)
DRM_ERROR("FDI train 2 fail!\n");
 
2664,11 → 2676,10
DRM_DEBUG_KMS("FDI train done.\n");
}
 
static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
2694,6 → 2705,9
POSTING_READ(reg);
udelay(200);
 
/* On Haswell, the PLL configuration for ports and pipes is handled
* separately, as part of DDI setup */
if (!IS_HASWELL(dev)) {
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
2704,7 → 2718,37
udelay(100);
}
}
}
 
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_PCDCLK);
 
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
 
POSTING_READ(reg);
udelay(100);
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
 
/* Wait for the clocks to turn off. */
POSTING_READ(reg);
udelay(100);
}
 
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2774,57 → 2818,71
udelay(100);
}
 
/*
* When we disable a pipe, we need to clear any pending scanline wait events
* to avoid hanging the ring, which we assume we are waiting on.
*/
static void intel_clear_scanline_wait(struct drm_device *dev)
static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 tmp;
unsigned long flags;
bool pending;
 
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
if (atomic_read(&dev_priv->mm.wedged))
return false;
 
ring = LP_RING(dev_priv);
tmp = I915_READ_CTL(ring);
if (tmp & RING_WAIT)
I915_WRITE_CTL(ring, tmp);
spin_lock_irqsave(&dev->event_lock, flags);
pending = to_intel_crtc(crtc)->unpin_work != NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
return pending;
}
 
#if 0
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (crtc->fb == NULL)
return;
 
obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj->pending_flip) == 0);
!intel_crtc_has_pending_flip(crtc));
 
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->fb);
mutex_unlock(&dev->struct_mutex);
}
#endif
 
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
struct intel_encoder *intel_encoder;
 
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 
switch (encoder->type) {
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
if (IS_HASWELL(dev)) {
/* It is still unclear how this will work on PPT, so throw up a warning */
WARN_ON(!HAS_PCH_LPT(dev));
 
if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
return true;
} else {
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
intel_encoder->type);
return false;
}
}
 
switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
return false;
continue;
}
2833,6 → 2891,97
return true;
}
 
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
 
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
 
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
intel_sbi_read(dev_priv, SBI_SSCCTL6) |
SBI_SSCCTL_DISABLE);
 
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
auxdiv = 1;
divsel = 0x41;
phaseinc = 0x20;
} else {
/* The iCLK virtual clock root frequency is in MHz,
* but the crtc->mode.clock in in KHz. To get the divisors,
* it is necessary to divide one by another, so we
* convert the virtual clock precision to KHz here for higher
* precision.
*/
u32 iclk_virtual_root_freq = 172800 * 1000;
u32 iclk_pi_range = 64;
u32 desired_divisor, msb_divisor_value, pi_value;
 
desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
msb_divisor_value = desired_divisor / iclk_pi_range;
pi_value = desired_divisor % iclk_pi_range;
 
auxdiv = 0;
divsel = msb_divisor_value - 2;
phaseinc = pi_value;
}
 
/* This should not happen with any sane values */
WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
~SBI_SSCDIVINTPHASE_INCVAL_MASK);
 
DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
crtc->mode.clock,
auxdiv,
divsel,
phasedir,
phaseinc);
 
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
 
intel_sbi_write(dev_priv,
SBI_SSCDIVINTPHASE6,
temp);
 
/* Program SSCAUXDIV */
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
intel_sbi_write(dev_priv,
SBI_SSCAUXDIV6,
temp);
 
 
/* Enable modulator and associated divider */
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv,
SBI_SSCCTL6,
temp);
 
/* Wait for initialization time */
udelay(24);
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
 
/*
* Enable PCH resources required for PCH ports:
* - PCH PLLs
2847,29 → 2996,41
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, transc_sel;
u32 reg, temp;
 
assert_transcoder_disabled(dev_priv, pipe);
 
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
 
intel_enable_pch_pll(dev_priv, pipe);
intel_enable_pch_pll(intel_crtc);
 
if (HAS_PCH_CPT(dev)) {
transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
TRANSC_DPLLB_SEL;
if (HAS_PCH_LPT(dev)) {
DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
lpt_program_iclkip(crtc);
} else if (HAS_PCH_CPT(dev)) {
u32 sel;
 
/* Be sure PCH DPLL SEL is set */
temp = I915_READ(PCH_DPLL_SEL);
if (pipe == 0) {
temp &= ~(TRANSA_DPLLB_SEL);
temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
} else if (pipe == 1) {
temp &= ~(TRANSB_DPLLB_SEL);
temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
} else if (pipe == 2) {
temp &= ~(TRANSC_DPLLB_SEL);
temp |= (TRANSC_DPLL_ENABLE | transc_sel);
switch (pipe) {
default:
case 0:
temp |= TRANSA_DPLL_ENABLE;
sel = TRANSA_DPLLB_SEL;
break;
case 1:
temp |= TRANSB_DPLL_ENABLE;
sel = TRANSB_DPLLB_SEL;
break;
case 2:
temp |= TRANSC_DPLL_ENABLE;
sel = TRANSC_DPLLB_SEL;
break;
}
if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
temp |= sel;
else
temp &= ~sel;
I915_WRITE(PCH_DPLL_SEL, temp);
}
 
2882,7 → 3043,9
I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
 
if (!IS_HASWELL(dev))
intel_fdi_normal_train(crtc);
 
/* For PCH DP, enable TRANS_DP_CTL */
2926,6 → 3089,93
intel_enable_transcoder(dev_priv, pipe);
}
 
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
{
struct intel_pch_pll *pll = intel_crtc->pch_pll;
 
if (pll == NULL)
return;
 
if (pll->refcount == 0) {
WARN(1, "bad PCH PLL refcount\n");
return;
}
 
--pll->refcount;
intel_crtc->pch_pll = NULL;
}
 
static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
int i;
 
pll = intel_crtc->pch_pll;
if (pll) {
DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
goto prepare;
}
 
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = intel_crtc->pipe;
pll = &dev_priv->pch_plls[i];
 
DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
 
goto found;
}
 
for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
 
/* Only want to check enabled timings first */
if (pll->refcount == 0)
continue;
 
if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
fp == I915_READ(pll->fp0_reg)) {
DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
intel_crtc->base.base.id,
pll->pll_reg, pll->refcount, pll->active);
 
goto found;
}
}
 
/* Ok no matching timings, maybe there's a free one? */
for (i = 0; i < dev_priv->num_pch_pll; i++) {
pll = &dev_priv->pch_plls[i];
if (pll->refcount == 0) {
DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
intel_crtc->base.base.id, pll->pll_reg);
goto found;
}
}
 
return NULL;
 
found:
intel_crtc->pch_pll = pll;
pll->refcount++;
DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
prepare: /* separate function? */
DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
 
/* Wait for the clocks to stabilize before rewriting the regs */
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(pll->pll_reg);
udelay(150);
 
I915_WRITE(pll->fp0_reg, fp);
I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
pll->on = false;
return pll;
}
 
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
2949,11 → 3199,14
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 temp;
bool is_pch_port;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
return;
 
2968,11 → 3221,17
 
is_pch_port = intel_crtc_driving_pch(crtc);
 
if (is_pch_port)
ironlake_fdi_pll_enable(crtc);
else
ironlake_fdi_disable(crtc);
if (is_pch_port) {
ironlake_fdi_pll_enable(intel_crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3002,6 → 3261,22
mutex_unlock(&dev->struct_mutex);
 
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
 
/*
* There seems to be a race in PCH platform hw (at least on some
* outputs) where an enabled pipe still completes any pageflip right
* away (as if the pipe is off) instead of waiting for vblank. As soon
* as the first vblank happend, everything works as expected. Hence just
* wait for one vblank before returning to avoid strange things
* happening.
*/
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
static void ironlake_crtc_disable(struct drm_crtc *crtc)
3009,16 → 3284,19
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
 
 
if (!intel_crtc->active)
return;
 
ENTER();
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
intel_crtc_wait_for_pending_flips(crtc);
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
// intel_crtc_update_cursor(crtc, false);
 
3033,15 → 3311,12
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
ironlake_fdi_disable(crtc);
 
/* This is a horrible layering violation; we should be doing this in
* the connector/encoder ->prepare instead, but we don't always have
* enough information there about the config to know whether it will
* actually be necessary or just cause undesired flicker.
*/
intel_disable_pch_ports(dev_priv, pipe);
 
intel_disable_transcoder(dev_priv, pipe);
 
if (HAS_PCH_CPT(dev)) {
3072,65 → 3347,24
}
 
/* disable PCH DPLL */
if (!intel_crtc->no_pll)
intel_disable_pch_pll(dev_priv, pipe);
intel_disable_pch_pll(intel_crtc);
 
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_PCDCLK);
ironlake_fdi_pll_disable(intel_crtc);
 
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
 
POSTING_READ(reg);
udelay(100);
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
 
/* Wait for the clocks to turn off. */
POSTING_READ(reg);
udelay(100);
 
intel_crtc->active = false;
intel_update_watermarks(dev);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
intel_clear_scanline_wait(dev);
mutex_unlock(&dev->struct_mutex);
 
LEAVE();
 
}
 
static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
ironlake_crtc_enable(crtc);
break;
 
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
ironlake_crtc_disable(crtc);
break;
intel_put_pch_pll(intel_crtc);
}
}
 
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
3155,9 → 3389,12
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
return;
 
3174,6 → 3411,9
/* Give the overlay scaler a chance to enable if it's on this pipe */
intel_crtc_dpms_overlay(intel_crtc, true);
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
}
 
static void i9xx_crtc_disable(struct drm_crtc *crtc)
3181,14 → 3421,19
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
 
 
if (!intel_crtc->active)
return;
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
/* Give the overlay scaler a chance to disable if it's on this pipe */
intel_crtc_wait_for_pending_flips(crtc);
// intel_crtc_wait_for_pending_flips(crtc);
// drm_vblank_off(dev, pipe);
intel_crtc_dpms_overlay(intel_crtc, false);
// intel_crtc_update_cursor(crtc, false);
3203,45 → 3448,21
intel_crtc->active = false;
intel_update_fbc(dev);
intel_update_watermarks(dev);
intel_clear_scanline_wait(dev);
}
 
static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
static void i9xx_crtc_off(struct drm_crtc *crtc)
{
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
*/
switch (mode) {
case DRM_MODE_DPMS_ON:
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
i9xx_crtc_enable(crtc);
break;
case DRM_MODE_DPMS_OFF:
i9xx_crtc_disable(crtc);
break;
}
}
 
/**
* Sets the power management mode of the pipe and plane.
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
static void intel_crtc_update_sarea(struct drm_crtc *crtc,
bool enabled)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool enabled;
 
if (intel_crtc->dpms_mode == mode)
return;
 
intel_crtc->dpms_mode = mode;
 
dev_priv->display.dpms(crtc, mode);
 
#if 0
if (!dev->primary->master)
return;
3250,8 → 3471,6
if (!master_priv->sarea_priv)
return;
 
enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
 
switch (pipe) {
case 0:
master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3269,79 → 3488,177
 
}
 
/**
* Sets the power management mode of the pipe and plane.
*/
void intel_crtc_update_dpms(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
bool enable = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder)
enable |= intel_encoder->connectors_active;
 
if (enable)
dev_priv->display.crtc_enable(crtc);
else
dev_priv->display.crtc_disable(crtc);
 
intel_crtc_update_sarea(crtc, enable);
}
 
static void intel_crtc_noop(struct drm_crtc *crtc)
{
}
 
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
 
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
 
if (crtc->fb) {
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
 
assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
 
// if (crtc->fb) {
// mutex_lock(&dev->struct_mutex);
// intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
// mutex_unlock(&dev->struct_mutex);
// crtc->fb = NULL;
// }
 
/* Update computed state. */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder || !connector->encoder->crtc)
continue;
 
if (connector->encoder->crtc != crtc)
continue;
 
connector->dpms = DRM_MODE_DPMS_OFF;
to_intel_encoder(connector->encoder)->connectors_active = false;
}
}
 
/* Prepare for a mode set.
*
* Note we could be a lot smarter here. We need to figure out which outputs
* will be enabled, which disabled (in short, how the config will changes)
* and perform the minimum necessary steps to accomplish that, e.g. updating
* watermarks, FBC configuration, making sure PLLs are programmed correctly,
* panel fitting is in the proper state, etc.
*/
static void i9xx_crtc_prepare(struct drm_crtc *crtc)
void intel_modeset_disable(struct drm_device *dev)
{
i9xx_crtc_disable(crtc);
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled)
intel_crtc_disable(crtc);
}
}
 
static void i9xx_crtc_commit(struct drm_crtc *crtc)
void intel_encoder_noop(struct drm_encoder *encoder)
{
i9xx_crtc_enable(crtc);
}
 
static void ironlake_crtc_prepare(struct drm_crtc *crtc)
void intel_encoder_destroy(struct drm_encoder *encoder)
{
ironlake_crtc_disable(crtc);
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
 
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
}
 
static void ironlake_crtc_commit(struct drm_crtc *crtc)
/* Simple dpms helper for encodres with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
ironlake_crtc_enable(crtc);
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
 
intel_crtc_update_dpms(encoder->base.crtc);
} else {
encoder->connectors_active = false;
 
intel_crtc_update_dpms(encoder->base.crtc);
}
}
 
void intel_encoder_prepare(struct drm_encoder *encoder)
/* Cross check the actual hw state with our own modeset state tracking (and it's
* internal consistency). */
static void intel_connector_check_state(struct intel_connector *connector)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
/* lvds has its own version of prepare see intel_lvds_prepare */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
if (connector->get_hw_state(connector)) {
struct intel_encoder *encoder = connector->encoder;
struct drm_crtc *crtc;
bool encoder_enabled;
enum pipe pipe;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
 
WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
"wrong connector dpms state\n");
WARN(connector->base.encoder != &encoder->base,
"active connector not linked to encoder\n");
WARN(!encoder->connectors_active,
"encoder->connectors_active not set\n");
 
encoder_enabled = encoder->get_hw_state(encoder, &pipe);
WARN(!encoder_enabled, "encoder not enabled\n");
if (WARN_ON(!encoder->base.crtc))
return;
 
crtc = encoder->base.crtc;
 
WARN(!crtc->enabled, "crtc not enabled\n");
WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
WARN(pipe != to_intel_crtc(crtc)->pipe,
"encoder active on the wrong pipe\n");
}
}
 
void intel_encoder_commit(struct drm_encoder *encoder)
/* Even simpler default implementation, if there's really no special case to
* consider. */
void intel_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_device *dev = encoder->dev;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
struct intel_encoder *encoder = intel_attached_encoder(connector);
 
/* lvds has its own version of commit see intel_lvds_commit */
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
/* All the simple cases only support two dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
if (mode == connector->dpms)
return;
 
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
if (encoder->base.crtc)
intel_encoder_dpms(encoder, mode);
else
WARN_ON(encoder->connectors_active != false);
 
intel_modeset_check_state(connector->dev);
}
 
void intel_encoder_destroy(struct drm_encoder *encoder)
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
bool intel_connector_get_hw_state(struct intel_connector *connector)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
enum pipe pipe = 0;
struct intel_encoder *encoder = connector->encoder;
 
drm_encoder_cleanup(encoder);
kfree(intel_encoder);
return encoder->get_hw_state(encoder, &pipe);
}
 
static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
3352,15 → 3669,27
return false;
}
 
/* XXX some encoders set the crtcinfo, others don't.
* Obviously we need some form of conflict resolution here...
*/
if (adjusted_mode->crtc_htotal == 0)
/* All interlaced capable intel hw wants timings in frames. Note though
* that intel_lvds_mode_fixup does some funny tricks with the crtc
* timings, so we need to be careful not to clobber these.*/
if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
* with a hsync front porch of 0.
*/
if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
return false;
 
return true;
}
 
static int valleyview_get_display_clock_speed(struct drm_device *dev)
{
return 400000; /* FIXME */
}
 
static int i945_get_display_clock_speed(struct drm_device *dev)
{
return 400000;
3458,1323 → 3787,6
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
 
 
struct intel_watermark_params {
unsigned long fifo_size;
unsigned long max_wm;
unsigned long default_wm;
unsigned long guard_size;
unsigned long cacheline_size;
};
 
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
 
static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
 
static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
 
 
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
* @wm: chip FIFO params
* @pixel_size: display pixel size
* @latency_ns: memory latency for the platform
*
* Calculate the watermark level (the level at which the display plane will
* start fetching from memory again). Each chip has a different display
* FIFO size and allocation, so the caller needs to figure that out and pass
* in the correct intel_watermark_params structure.
*
* As the pixel clock runs, the FIFO will be drained at a rate that depends
* on the pixel size. When it reaches the watermark level, it'll start
* fetching FIFO line sized based chunks from memory until the FIFO fills
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
const struct intel_watermark_params *wm,
int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
long entries_required, wm_size;
 
/*
* Note: we need to make sure we don't overflow for various clock &
* latency values.
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1000;
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 
DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
 
wm_size = fifo_size - (entries_required + wm->guard_size);
 
DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
 
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
return wm_size;
}
 
struct cxsr_latency {
int is_desktop;
int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
unsigned long display_hpll_disable;
unsigned long cursor_sr;
unsigned long cursor_hpll_disable;
};
 
static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
{1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
{1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
 
{1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
{1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
{1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
{1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
{1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
 
{1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
{1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
{1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
{1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
{1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
 
{0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
{0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
{0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
{0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
{0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
 
{0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
{0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
{0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
{0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
{0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
 
{0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
{0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
{0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
{0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
 
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
int is_ddr3,
int fsb,
int mem)
{
const struct cxsr_latency *latency;
int i;
 
if (fsb == 0 || mem == 0)
return NULL;
 
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
 
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 
return NULL;
}
 
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* deactivate cxsr */
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
 
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
* - chipset
* - current MCH state
* It can be fairly high in some situations, so here we assume a fairly
* pessimal value. It's a tradeoff between extra memory fetches (if we
* set this value too high, the FIFO will fetch frequently to stay full)
* and power consumption (set it too low to save power and we might see
* FIFO underruns and display "flicker").
*
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
static const int latency_ns = 5000;
 
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
if (plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x1ff;
if (plane)
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
static int i845_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A",
size);
 
return size;
}
 
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
{
struct drm_crtc *crtc, *enabled = NULL;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled && crtc->fb) {
if (enabled)
return NULL;
enabled = crtc;
}
}
 
return enabled;
}
 
static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
 
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
return;
}
 
crtc = single_enabled_crtc(dev);
if (crtc) {
int clock = crtc->mode.clock;
int pixel_size = crtc->fb->bits_per_pixel / 8;
 
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= wm << DSPFW_SR_SHIFT;
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
 
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
I915_WRITE(DSPFW3, reg);
 
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= wm & DSPFW_HPLL_SR_MASK;
I915_WRITE(DSPFW3, reg);
 
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
/* activate cxsr */
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
DRM_DEBUG_KMS("Self-refresh is disabled\n");
}
}
 
static bool g4x_compute_wm0(struct drm_device *dev,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
const struct intel_watermark_params *cursor,
int cursor_latency_ns,
int *plane_wm,
int *cursor_wm)
{
struct drm_crtc *crtc;
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
 
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*plane_wm = entries + display->guard_size;
if (*plane_wm > (int)display->max_wm)
*plane_wm = display->max_wm;
 
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
if (*cursor_wm > (int)cursor->max_wm)
*cursor_wm = (int)cursor->max_wm;
 
return true;
}
 
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool g4x_check_srwm(struct drm_device *dev,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
display_wm, cursor_wm);
 
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
display_wm, display->max_wm);
return false;
}
 
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
 
if (!(display_wm || cursor_wm)) {
DRM_DEBUG_KMS("SR latency is 0, disabling\n");
return false;
}
 
return true;
}
 
static bool g4x_compute_srwm(struct drm_device *dev,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*display_wm = *cursor_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
return g4x_check_srwm(dev,
*display_wm, *cursor_wm,
display, cursor);
}
 
#define single_plane_enabled(mask) is_power_of_2(mask)
 
static void g4x_update_wm(struct drm_device *dev)
{
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
 
if (g4x_compute_wm0(dev, 0,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1;
 
if (g4x_compute_wm0(dev, 1,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 2;
 
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
else
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
 
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
int clock = crtc->mode.clock;
int htotal = crtc->mode.htotal;
int hdisplay = crtc->mode.hdisplay;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
line_time_us = ((htotal * 1000) / clock);
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
entries, srwm);
 
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
(entries + i965_cursor_wm_info.guard_size);
 
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
 
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
 
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
 
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
(8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
struct drm_crtc *crtc, *enabled = NULL;
 
if (IS_I945GM(dev))
wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
wm_info = &i855_wm_info;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
planea_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
enabled = crtc;
} else
planea_wm = fifo_size - wm_info->guard_size;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
planeb_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
if (enabled == NULL)
enabled = crtc;
else
enabled = NULL;
} else
planeb_wm = fifo_size - wm_info->guard_size;
 
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
cwm = 2;
 
/* Play safe and disable self-refresh before adjusting watermarks. */
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
 
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
int clock = enabled->mode.clock;
int htotal = enabled->mode.htotal;
int hdisplay = enabled->mode.hdisplay;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
line_time_us = (htotal * 1000) / clock;
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
 
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
 
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
 
/* Set request length to 8 cachelines per fetch */
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
 
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
 
if (HAS_FW_BLC(dev)) {
if (enabled) {
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
}
 
static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
uint32_t fwater_lo;
int planea_wm;
 
crtc = single_enabled_crtc(dev);
if (crtc == NULL)
return;
 
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
crtc->fb->bits_per_pixel / 8,
latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
 
I915_WRITE(FW_BLC, fwater_lo);
}
 
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
 
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool ironlake_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
 
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
 
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
}
 
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
 
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
 
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
 
return true;
}
 
/*
* Compute watermark values of WM[1-3],
*/
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
 
/*
* Spec says:
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
*/
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
return ironlake_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm,
display, cursor);
}
 
static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
 
if (g4x_compute_wm0(dev, 1,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/*
* WM3 is unsupported on ILK, probably because we don't have latency
* data for that power state
*/
}
 
void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
 
if (g4x_compute_wm0(dev, 1,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
 
/* IVB has 3 pipes */
if (IS_IVYBRIDGE(dev) &&
g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEC_IVB,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 3;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled) ||
dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
 
static bool
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int display_latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
int clock;
int entries, tlb_miss;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
*sprite_wm = display->guard_size;
return false;
}
 
clock = crtc->mode.clock;
 
/* Use the small buffer method to calculate the sprite watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size -
sprite_width * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*sprite_wm = entries + display->guard_size;
if (*sprite_wm > (int)display->max_wm)
*sprite_wm = display->max_wm;
 
return true;
}
 
static bool
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*sprite_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
clock = crtc->mode.clock;
 
line_time_us = (sprite_width * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = sprite_width * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*sprite_wm = entries + display->guard_size;
 
return *sprite_wm > 0x3ff ? false : true;
}
 
static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
int sprite_wm, reg;
int ret;
 
switch (pipe) {
case 0:
reg = WM0_PIPEA_ILK;
break;
case 1:
reg = WM0_PIPEB_ILK;
break;
case 2:
reg = WM0_PIPEC_IVB;
break;
default:
return; /* bad pipe */
}
 
ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
&sandybridge_display_wm_info,
latency, &sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
pipe);
return;
}
 
I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
 
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM1_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM1S_LP_ILK, sprite_wm);
 
/* Only IVB has two more LP watermarks for sprite */
if (!IS_IVYBRIDGE(dev))
return;
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM2_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM2S_LP_IVB, sprite_wm);
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM3_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM3S_LP_IVB, sprite_wm);
}
 
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
*
* There are several cases to deal with here:
* - normal (i.e. non-self-refresh)
* - self-refresh (SR) mode
* - lines are large relative to FIFO size (buffer can hold up to 2)
* - lines are small relative to FIFO size (buffer can hold more than 2
* lines), so need to account for TLB latency
*
* The normal calculation is:
* watermark = dotclock * bytes per pixel * latency
* where latency is platform & configuration dependent (we assume pessimal
* values here).
*
* The SR calculation is:
* watermark = (trunc(latency/line time)+1) * surface width *
* bytes per pixel
* where
* line time = htotal / dotclock
* surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high, as above.
*
* The final value programmed to the register should always be rounded up,
* and include an extra 2 entries to account for clock crossings.
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
static void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(dev);
}
 
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
pixel_size);
}
 
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
4805,22 → 3817,19
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
unsigned int display_bpc = UINT_MAX, bpc;
 
/* Walk the encoders & connectors on this crtc, get min bpc */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 
if (encoder->crtc != crtc)
continue;
 
if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
unsigned int lvds_bpc;
 
4837,21 → 3846,10
continue;
}
 
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
/* Use VBT settings if we have an eDP panel */
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 
if (edp_bpc < display_bpc) {
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
continue;
}
 
/* Not one of the known troublemakers, check the EDID */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
if (connector->encoder != encoder)
if (connector->encoder != &intel_encoder->base)
continue;
 
/* Don't use an invalid EDID bpc value */
4889,7 → 3887,7
* also stays within the max display bpc discovered above.
*/
 
switch (crtc->fb->depth) {
switch (fb->depth) {
case 8:
bpc = 8; /* since we go through a colormap */
break;
4922,61 → 3920,38
return display_bpc != bpc;
}
 
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
static int vlv_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
u32 temp;
u32 lvds_sync = 0;
int refclk = 27000; /* for DP & HDMI */
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
return 100000; /* only one validated so far */
 
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_DVO:
is_dvo = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
refclk = 96000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv))
refclk = 100000;
else
refclk = 96000;
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
refclk = 100000;
}
 
num_connectors++;
return refclk;
}
 
if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int refclk;
 
if (IS_VALLEYVIEW(dev)) {
refclk = vlv_get_refclk(crtc);
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
refclk = dev_priv->lvds_ssc_freq * 1000;
DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
refclk / 1000);
4986,74 → 3961,195
refclk = 48000;
}
 
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
return refclk;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
* switch the display clock by using the FP0/FP1.
* In such case we will disable the LVDS downclock
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
"LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
intel_clock_t *clock)
{
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
clock->p1 = 2;
clock->p2 = 10;
clock->n = 3;
clock->m1 = 16;
clock->m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
clock->p1 = 1;
clock->p2 = 10;
clock->n = 6;
clock->m1 = 12;
clock->m2 = 8;
}
}
 
static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
intel_clock_t *clock,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 fp, fp2 = 0;
 
if (IS_PINEVIEW(dev)) {
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = (1 << reduced_clock.n) << 16 |
reduced_clock.m1 << 8 | reduced_clock.m2;
fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
if (reduced_clock)
fp2 = (1 << reduced_clock->n) << 16 |
reduced_clock->m1 << 8 | reduced_clock->m2;
} else {
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
if (reduced_clock)
fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
reduced_clock->m2;
}
 
I915_WRITE(FP0(pipe), fp);
 
intel_crtc->lowfreq_avail = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
}
}
 
static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 temp;
 
temp = I915_READ(LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock->p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
 
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
/* set the dithering flag on LVDS as needed */
if (INTEL_INFO(dev)->gen >= 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(LVDS, temp);
}
 
static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
int refclk, int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
bool is_hdmi;
 
is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
bestn = clock->n;
bestm1 = clock->m1;
bestm2 = clock->m2;
bestp1 = clock->p1;
bestp2 = clock->p2;
 
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
 
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
mdiv |= (1 << DPIO_POST_DIV_SHIFT);
mdiv |= (1 << DPIO_K_SHIFT);
mdiv |= DPIO_ENABLE_CALIBRATION;
intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
 
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
 
pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
(8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
 
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
 
if (is_hdmi) {
u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
 
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
 
I915_WRITE(DPLL_MD(pipe), temp);
POSTING_READ(DPLL_MD(pipe));
}
 
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
}
 
static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 dpll;
bool is_sdvo;
 
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
dpll = DPLL_VGA_MODE_DIS;
 
if (!IS_GEN2(dev)) {
if (is_lvds)
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
dpll |= DPLLB_MODE_LVDS;
else
dpll |= DPLLB_MODE_DAC_SERIAL;
5065,18 → 4161,18
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (is_dp)
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
dpll |= DPLL_DVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && reduced_clock)
dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
}
switch (clock.p2) {
switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
5092,30 → 4188,210
}
if (INTEL_INFO(dev)->gen >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
 
if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_dp_set_m_n(crtc, mode, adjusted_mode);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
if (INTEL_INFO(dev)->gen >= 4) {
u32 temp = 0;
if (is_sdvo) {
temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
if (is_lvds) {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
}
 
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 dpll;
 
dpll = DPLL_VGA_MODE_DIS;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
} else {
if (clock.p1 == 2)
if (clock->p1 == 2)
dpll |= PLL_P1_DIVIDE_BY_TWO;
else
dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (clock.p2 == 4)
dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
if (clock->p2 == 4)
dpll |= PLL_P2_DIVIDE_BY_4;
}
}
 
if (is_sdvo && is_tv)
dpll |= PLL_REF_INPUT_TVCLKINBC;
else if (is_tv)
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
else
dpll |= PLL_REF_INPUT_DREFCLK;
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
 
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr, pipeconf, vsyncshift;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder;
const intel_limit_t *limit;
int ret;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
}
 
num_connectors++;
}
 
refclk = i9xx_get_refclk(crtc, num_connectors);
 
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
&clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
* If the clocks don't match, we can't switch the display clock
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&clock,
&reduced_clock);
}
 
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
&reduced_clock : NULL);
 
if (IS_GEN2(dev))
i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
else if (IS_VALLEYVIEW(dev))
vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
refclk, num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
 
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
 
5122,8 → 4398,6
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
/* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
5146,7 → 4420,7
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
5153,109 → 4427,14
}
}
 
dpll |= DPLL_VCO_ENABLE;
 
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
 
I915_WRITE(FP0(pipe), fp);
I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
 
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
temp = I915_READ(LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
 
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
/* set the dithering flag on LVDS as needed */
if (INTEL_INFO(dev)->gen >= 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
lvds_sync |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
lvds_sync |= LVDS_VSYNC_POLARITY;
if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
!= lvds_sync) {
char flags[2] = "-+";
DRM_INFO("Changing LVDS panel from "
"(%chsync, %cvsync) to (%chsync, %cvsync)\n",
flags[!(temp & LVDS_HSYNC_POLARITY)],
flags[!(temp & LVDS_VSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
temp |= lvds_sync;
}
I915_WRITE(LVDS, temp);
}
 
if (is_dp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
}
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
if (INTEL_INFO(dev)->gen >= 4) {
temp = 0;
if (is_sdvo) {
temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(DPLL(pipe), dpll);
}
 
intel_crtc->lowfreq_avail = false;
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
if (intel_crtc->lowfreq_avail) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(FP1(pipe), fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
}
5262,18 → 4441,22
}
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
if (!IS_GEN2(dev) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2;
} else {
pipeconf |= PIPECONF_PROGRESSIVE;
vsyncshift = 0;
}
 
if (!IS_GEN3(dev))
I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
5312,9 → 4495,8
 
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
intel_enable_plane(dev_priv, plane, pipe);
 
ret = intel_pipe_set_base(crtc, x, y, old_fb);
ret = intel_pipe_set_base(crtc, x, y, fb);
 
intel_update_watermarks(dev);
 
5389,7 → 4571,8
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
DRM_DEBUG_KMS("Using SSC on panel\n");
temp |= DREF_SSC1_ENABLE;
}
} else
temp &= ~DREF_SSC1_ENABLE;
 
/* Get SSC going before enabling the outputs */
I915_WRITE(PCH_DREF_CONTROL, temp);
5442,15 → 4625,11
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *edp_encoder = NULL;
int num_connectors = 0;
bool is_lvds = false;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
5471,11 → 4650,118
return 120000;
}
 
static void ironlake_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint32_t val;
 
val = I915_READ(PIPECONF(pipe));
 
val &= ~PIPE_BPC_MASK;
switch (intel_crtc->bpp) {
case 18:
val |= PIPE_6BPC;
break;
case 24:
val |= PIPE_8BPC;
break;
case 30:
val |= PIPE_10BPC;
break;
case 36:
val |= PIPE_12BPC;
break;
default:
val |= PIPE_8BPC;
break;
}
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
if (dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
val &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
 
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
 
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
bool *has_reduced_clock,
intel_clock_t *reduced_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
int refclk;
const intel_limit_t *limit;
bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
}
}
 
refclk = ironlake_get_refclk(crtc);
 
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
clock);
if (!ret)
return false;
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
/*
* Ensure we match the reduced clock's P to the target clock.
* If the clocks don't match, we can't switch the display clock
* by using the FP0/FP1. In such case we will disable the LVDS
* downclock feature.
*/
*has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
clock,
reduced_clock);
}
 
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
 
return true;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
5482,29 → 4768,21
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *has_edp_encoder = NULL;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct intel_encoder *encoder, *edp_encoder = NULL;
int ret;
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
bool is_cpu_edp = false, is_pch_edp = false;
 
ENTER();
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
5525,7 → 4803,12
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
has_edp_encoder = encoder;
is_dp = true;
if (intel_encoder_is_pch_edp(&encoder->base))
is_pch_edp = true;
else
is_cpu_edp = true;
edp_encoder = encoder;
break;
}
 
5532,15 → 4815,8
num_connectors++;
}
 
refclk = ironlake_get_refclk(crtc);
 
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
5549,61 → 4825,14
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
refclk,
&reduced_clock);
if (has_reduced_clock && (clock.p != reduced_clock.p)) {
/*
* If the different P is found, it means that we can't
* switch the display clock by using the FP0/FP1.
* In such case we will disable the LVDS downclock
* feature.
*/
DRM_DEBUG_KMS("Different P is found for "
"LVDS clock/downclock\n");
has_reduced_clock = 0;
}
}
/* SDVO TV has fixed PLL values depend on its clock range,
this mirrors vbios setting. */
if (is_sdvo && is_tv) {
if (adjusted_mode->clock >= 100000
&& adjusted_mode->clock < 140500) {
clock.p1 = 2;
clock.p2 = 10;
clock.n = 3;
clock.m1 = 16;
clock.m2 = 8;
} else if (adjusted_mode->clock >= 140500
&& adjusted_mode->clock <= 200000) {
clock.p1 = 1;
clock.p2 = 10;
clock.n = 6;
clock.m1 = 12;
clock.m2 = 8;
}
}
 
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
if (has_edp_encoder &&
!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
target_clock = mode->clock;
intel_edp_link_config(has_edp_encoder,
&lane, &link_bw);
if (is_cpu_edp) {
intel_edp_link_config(edp_encoder, &lane, &link_bw);
} else {
/* [e]DP over FDI requires target mode clock
instead of link clock */
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
 
/* FDI is a binary signal running at ~2.7GHz, encoding
* each output octet as 10 bits. The actual frequency
* is stored as a divider into a 100MHz clock, and the
5614,33 → 4843,27
link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
}
 
/* [e]DP over FDI requires target mode clock instead of link clock. */
if (edp_encoder)
target_clock = intel_edp_target_clock(edp_encoder, mode);
else if (is_dp)
target_clock = mode->clock;
else
target_clock = adjusted_mode->clock;
 
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
break;
case 24:
temp |= PIPE_8BPC;
break;
case 30:
temp |= PIPE_10BPC;
break;
case 36:
temp |= PIPE_12BPC;
break;
default:
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
adjusted_mode);
if (is_lvds && dev_priv->lvds_dither)
dither = true;
 
if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
pipe_bpp);
temp |= PIPE_8BPC;
pipe_bpp = 24;
break;
}
 
intel_crtc->bpp = pipe_bpp;
I915_WRITE(PIPECONF(pipe), temp);
 
if (!lane) {
/*
5690,7 → 4913,7
}
dpll |= DPLL_DVO_HIGH_SPEED;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
if (is_dp && !is_cpu_edp)
dpll |= DPLL_DVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
5724,39 → 4947,25
else
dpll |= PLL_REF_INPUT_DREFCLK;
 
/* setup pipeconf */
pipeconf = I915_READ(PIPECONF(pipe));
 
/* Set up the display plane register */
dspcntr = DISPPLANE_GAMMA_ENABLE;
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
 
/* PCH eDP needs FDI, but CPU eDP does not */
if (!intel_crtc->no_pll) {
if (!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
I915_WRITE(PCH_FP0(pipe), fp);
I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
/* CPU eDP is the only output that doesn't need a PCH PLL of its own on
* pre-Haswell/LPT generation */
if (HAS_PCH_LPT(dev)) {
DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
pipe);
} else if (!is_cpu_edp) {
struct intel_pch_pll *pll;
 
POSTING_READ(PCH_DPLL(pipe));
udelay(150);
}
} else {
if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
fp == I915_READ(PCH_FP0(0))) {
intel_crtc->use_pll_a = true;
DRM_DEBUG_KMS("using pipe a dpll\n");
} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
fp == I915_READ(PCH_FP0(1))) {
intel_crtc->use_pll_a = false;
DRM_DEBUG_KMS("using pipe b dpll\n");
} else {
DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
pipe);
return -EINVAL;
}
}
} else
intel_put_pch_pll(intel_crtc);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
5789,32 → 4998,15
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
lvds_sync |= LVDS_HSYNC_POLARITY;
temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
lvds_sync |= LVDS_VSYNC_POLARITY;
if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
!= lvds_sync) {
char flags[2] = "-+";
DRM_INFO("Changing LVDS panel from "
"(%chsync, %cvsync) to (%chsync, %cvsync)\n",
flags[!(temp & LVDS_HSYNC_POLARITY)],
flags[!(temp & LVDS_VSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
temp |= lvds_sync;
}
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
 
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_SP;
}
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
5824,13 → 5016,11
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
 
if (!intel_crtc->no_pll &&
(!has_edp_encoder ||
intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
I915_WRITE(PCH_DPLL(pipe), dpll);
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(PCH_DPLL(pipe));
POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
 
/* The pixel multiplier can only be updated once the
5838,39 → 5028,29
*
* So write it again.
*/
I915_WRITE(PCH_DPLL(pipe), dpll);
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
 
intel_crtc->lowfreq_avail = false;
if (!intel_crtc->no_pll) {
if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(PCH_FP1(pipe), fp2);
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("enabling CxSR downclocking\n");
pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
}
} else {
I915_WRITE(PCH_FP1(pipe), fp);
if (HAS_PIPE_CXSR(dev)) {
DRM_DEBUG_KMS("disabling CxSR downclocking\n");
pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
}
}
}
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vdisplay -= 1;
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_start -= 1;
adjusted_mode->crtc_vblank_end -= 1;
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(VSYNCSHIFT(pipe),
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
} else {
I915_WRITE(VSYNCSHIFT(pipe), 0);
}
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
5903,32 → 5083,22
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
 
if (has_edp_encoder &&
!intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
 
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_wait_for_vblank(dev, pipe);
 
if (IS_GEN5(dev)) {
/* enable address swizzle for tiling buffer */
temp = I915_READ(DISP_ARB_CTL);
I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
}
 
I915_WRITE(DSPCNTR(plane), dspcntr);
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
ret = intel_pipe_set_base(crtc, x, y, old_fb);
ret = intel_pipe_set_base(crtc, x, y, fb);
 
dbgprintf("Set base\n");
 
intel_update_watermarks(dev);
 
LEAVE();
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 
return ret;
}
5937,7 → 5107,7
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
5945,17 → 5115,12
int pipe = intel_crtc->pipe;
int ret;
 
// drm_vblank_pre_modeset(dev, pipe);
ENTER();
drm_vblank_pre_modeset(dev, pipe);
 
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
x, y, fb);
drm_vblank_post_modeset(dev, pipe);
 
// drm_vblank_post_modeset(dev, pipe);
 
intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
LEAVE();
 
return ret;
}
 
6028,6 → 5193,91
I915_WRITE(G4X_AUD_CNTL_ST, i);
}
 
static void haswell_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
uint32_t eldv;
uint32_t i;
int len;
int pipe = to_intel_crtc(crtc)->pipe;
int tmp;
 
int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
int aud_config = HSW_AUD_CFG(pipe);
int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
 
 
DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
 
/* Audio output enable */
DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
tmp = I915_READ(aud_cntrl_st2);
tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
 
/* Wait for 1 vertical blank */
intel_wait_for_vblank(dev, pipe);
 
/* Set ELD valid state */
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
I915_WRITE(aud_cntrl_st2, tmp);
tmp = I915_READ(aud_cntrl_st2);
DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
 
/* Enable HDMI mode */
tmp = I915_READ(aud_config);
DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
/* clear N_programing_enable and N_value_index */
tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
I915_WRITE(aud_config, tmp);
 
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else
I915_WRITE(aud_config, 0);
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
aud_cntl_st, IBX_ELD_ADDRESS,
hdmiw_hdmiedid))
return;
 
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
 
if (!eld[0])
return;
 
i = I915_READ(aud_cntl_st);
i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
DRM_DEBUG_DRIVER("port num:%d\n", i);
 
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
DRM_DEBUG_DRIVER("ELD size %d\n", len);
for (i = 0; i < len; i++)
I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
 
i = I915_READ(aud_cntrl_st2);
i |= eldv;
I915_WRITE(aud_cntrl_st2, i);
 
}
 
static void ironlake_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
6037,27 → 5287,27
uint32_t i;
int len;
int hdmiw_hdmiedid;
int aud_config;
int aud_cntl_st;
int aud_cntrl_st2;
int pipe = to_intel_crtc(crtc)->pipe;
 
if (HAS_PCH_IBX(connector->dev)) {
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
aud_cntl_st = IBX_AUD_CNTL_ST_A;
hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
aud_config = IBX_AUD_CFG(pipe);
aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
aud_cntl_st = CPT_AUD_CNTL_ST_A;
hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
aud_config = CPT_AUD_CFG(pipe);
aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
 
i = to_intel_crtc(crtc)->pipe;
hdmiw_hdmiedid += i * 0x100;
aud_cntl_st += i * 0x100;
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
 
i = I915_READ(aud_cntl_st);
i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
6072,7 → 5322,9
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
}
I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
} else
I915_WRITE(aud_config, 0);
 
if (intel_eld_uptodate(connector,
aud_cntrl_st2, eldv,
6135,7 → 5387,7
int i;
 
/* The clocks have to be on to load the palette. */
if (!crtc->enabled)
if (!crtc->enabled || !intel_crtc->active)
return;
 
/* use legacy palette for Ironlake */
6150,42 → 5402,263
}
}
 
#if 0
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool visible = base != 0;
u32 cntl;
 
if (intel_crtc->cursor_visible == visible)
return;
 
cntl = I915_READ(_CURACNTR);
if (visible) {
/* On these chipsets we can only modify the base whilst
* the cursor is disabled.
*/
I915_WRITE(_CURABASE, base);
 
cntl &= ~(CURSOR_FORMAT_MASK);
/* XXX width must be 64, stride 256 => 0x00 << 28 */
cntl |= CURSOR_ENABLE |
CURSOR_GAMMA_ENABLE |
CURSOR_FORMAT_ARGB;
} else
cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
I915_WRITE(_CURACNTR, cntl);
 
intel_crtc->cursor_visible = visible;
}
 
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
 
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR(pipe));
if (base) {
cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
cntl |= pipe << 28; /* Connect to correct pipe */
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(CURCNTR(pipe), cntl);
 
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(CURBASE(pipe), base);
}
 
static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
bool visible = base != 0;
 
if (intel_crtc->cursor_visible != visible) {
uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
if (base) {
cntl &= ~CURSOR_MODE;
cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
} else {
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
intel_crtc->cursor_visible = visible;
}
/* and commit changes on next vblank */
I915_WRITE(CURBASE_IVB(pipe), base);
}
 
/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int x = intel_crtc->cursor_x;
int y = intel_crtc->cursor_y;
u32 base, pos;
bool visible;
 
pos = 0;
 
if (on && crtc->enabled && crtc->fb) {
base = intel_crtc->cursor_addr;
if (x > (int) crtc->fb->width)
base = 0;
 
if (y > (int) crtc->fb->height)
base = 0;
} else
base = 0;
 
if (x < 0) {
if (x + intel_crtc->cursor_width < 0)
base = 0;
 
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
}
pos |= x << CURSOR_X_SHIFT;
 
if (y < 0) {
if (y + intel_crtc->cursor_height < 0)
base = 0;
 
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
 
visible = base != 0;
if (!visible && !intel_crtc->cursor_visible)
return;
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
I915_WRITE(CURPOS_IVB(pipe), pos);
ivb_update_cursor(crtc, base);
} else {
I915_WRITE(CURPOS(pipe), pos);
if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
}
}
 
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
 
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
 
/* Currently we only support 64x64 cursors */
if (width != 64 || height != 64) {
DRM_ERROR("we currently only support 64x64 cursors\n");
return -EINVAL;
}
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (&obj->base == NULL)
return -ENOENT;
 
if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
}
 
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
if (obj->tiling_mode) {
DRM_ERROR("cursor cannot be tiled\n");
ret = -EINVAL;
goto fail_locked;
}
 
ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_locked;
}
 
ret = i915_gem_object_put_fence(obj);
if (ret) {
DRM_ERROR("failed to release fence for cursor");
goto fail_unpin;
}
 
addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
addr = obj->phys_obj->handle->busaddr;
}
 
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
 
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
 
mutex_unlock(&dev->struct_mutex);
 
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
 
// intel_crtc_update_cursor(crtc, true);
 
return 0;
fail_unpin:
i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
 
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
intel_crtc->cursor_x = x;
intel_crtc->cursor_y = y;
 
// intel_crtc_update_cursor(crtc, true);
 
return 0;
}
#endif
 
/** Sets the color ramps on behalf of RandR */
void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
6242,9 → 5715,29
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
 
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
int ret;
 
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
drm_gem_object_unreference_unlocked(&obj->base);
return ERR_PTR(-ENOMEM);
}
 
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
 
return &intel_fb->base;
}
 
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
6295,30 → 5788,31
 
// obj = dev_priv->fbdev->ifb.obj;
// if (obj == NULL)
// return NULL;
 
// fb = &dev_priv->fbdev->ifb.base;
// if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
// fb->bits_per_pixel))
return NULL;
 
// if (obj->base.size < mode->vdisplay * fb->pitch)
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
fb->bits_per_pixel))
// return NULL;
 
if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
 
// return fb;
}
 
bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old)
{
struct intel_crtc *intel_crtc;
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_crtc *possible_crtc;
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = NULL;
struct drm_device *dev = encoder->dev;
struct drm_framebuffer *old_fb;
struct drm_framebuffer *fb;
int i = -1;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6339,22 → 5833,13
if (encoder->crtc) {
crtc = encoder->crtc;
 
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = intel_crtc->dpms_mode;
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
 
/* Make sure the crtc and connector are running */
if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
if (connector->dpms != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
 
crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
 
encoder_funcs = encoder->helper_private;
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
 
return true;
}
 
6377,11 → 5862,11
return false;
}
 
encoder->crtc = crtc;
connector->encoder = encoder;
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
 
intel_crtc = to_intel_crtc(crtc);
old->dpms_mode = intel_crtc->dpms_mode;
old->dpms_mode = connector->dpms;
old->load_detect_temp = true;
old->release_fb = NULL;
 
6388,8 → 5873,6
if (!mode)
mode = &load_detect_mode;
 
old_fb = crtc->fb;
 
/* We need a framebuffer large enough to accommodate all accesses
* that the plane may generate whilst we perform load detection.
* We can not rely on the fbcon either being present (we get called
6397,25 → 5880,23
* not even exist) or that it is large enough to satisfy the
* requested mode.
*/
crtc->fb = mode_fits_in_fbdev(dev, mode);
if (crtc->fb == NULL) {
fb = mode_fits_in_fbdev(dev, mode);
if (fb == NULL) {
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
old->release_fb = crtc->fb;
fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
old->release_fb = fb;
} else
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(crtc->fb)) {
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
crtc->fb = old_fb;
return false;
goto fail;
}
 
if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
crtc->fb = old_fb;
return false;
goto fail;
}
 
/* let the connector get through one full cycle before testing */
6422,17 → 5903,18
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
return true;
fail:
connector->encoder = NULL;
encoder->crtc = NULL;
return false;
}
 
void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old)
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
6439,9 → 5921,12
encoder->base.id, drm_get_encoder_name(encoder));
 
if (old->load_detect_temp) {
connector->encoder = NULL;
drm_helper_disable_unused_functions(dev);
struct drm_crtc *crtc = encoder->crtc;
 
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
 
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
 
6449,11 → 5934,9
}
 
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON) {
encoder_funcs->dpms(encoder, old->dpms_mode);
crtc_funcs->dpms(crtc, old->dpms_mode);
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
}
}
 
/* Returns the clock of the currently programmed mode of the given pipe. */
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6570,21 → 6053,10
mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
 
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
 
return mode;
}
 
#define GPU_IDLE_TIMEOUT 500 /* ms */
 
 
 
 
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
 
 
 
 
static void intel_increase_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
6594,8 → 6066,6
int dpll_reg = DPLL(pipe);
int dpll;
 
ENTER();
 
if (HAS_PCH_SPLIT(dev))
return;
 
6606,9 → 6076,7
if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
/* Unlock panel regs */
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
assert_panel_unlocked(dev_priv, pipe);
 
dpll &= ~DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
6617,37 → 6085,88
dpll = I915_READ(dpll_reg);
if (dpll & DISPLAY_RATE_SELECT_FPA1)
DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
 
/* ...and lock them again */
I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
}
 
LEAVE();
 
/* Schedule downclock */
}
 
static void intel_decrease_pllclock(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (HAS_PCH_SPLIT(dev))
return;
 
if (!dev_priv->lvds_downclock_avail)
return;
 
/*
* Since this is called by a timer, we should never get here in
* the manual case.
*/
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll;
 
DRM_DEBUG_DRIVER("downclocking LVDS\n");
 
assert_panel_unlocked(dev_priv, pipe);
 
dpll = I915_READ(dpll_reg);
dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe);
dpll = I915_READ(dpll_reg);
if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
}
 
}
 
void intel_mark_busy(struct drm_device *dev)
{
i915_update_gfx_val(dev->dev_private);
}
 
void intel_mark_idle(struct drm_device *dev)
{
}
 
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
 
if (!i915_powersave)
return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj == obj)
intel_increase_pllclock(crtc);
}
}
 
void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
 
if (!i915_powersave)
return;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj == obj)
intel_decrease_pllclock(crtc);
}
}
 
 
 
 
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6670,1176 → 6189,1570
kfree(intel_crtc);
}
 
#if 0
static void intel_unpin_work_fn(struct work_struct *__work)
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
 
mutex_lock(&work->dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
 
intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
 
static void do_intel_finish_page_flip(struct drm_device *dev,
struct drm_crtc *crtc)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
struct timeval tvbl;
unsigned long flags;
 
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
 
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
 
intel_crtc->unpin_work = NULL;
 
if (work->event) {
e = work->event;
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
 
e->event.tv_sec = tvbl.tv_sec;
e->event.tv_usec = tvbl.tv_usec;
 
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
 
drm_vblank_put(dev, intel_crtc->pipe);
 
spin_unlock_irqrestore(&dev->event_lock, flags);
 
obj = work->old_fb_obj;
 
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
 
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
 
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
 
void intel_finish_page_flip(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 
do_intel_finish_page_flip(dev, crtc);
}
 
void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
 
do_intel_finish_page_flip(dev, crtc);
}
 
void intel_prepare_page_flip(struct drm_device *dev, int plane)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
 
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
if ((++intel_crtc->unpin_work->pending) > 1)
DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
 
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
 
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
u32 flip_mask;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 6);
if (ret)
goto err_unpin;
 
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
 
intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pf, pipesrc;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
 
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
(obj->gtt_offset + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
 
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
uint32_t pf, pipesrc;
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
 
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
 
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
* flipping with a non-native mode, and worse causes a normal
* modeset to fail.
* pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
/*
* On gen7 we currently use the blit ring because (in early silicon at least)
* the render ring doesn't give us interrpts for page flip completion, which
* means clients will hang after the first flip is queued. Fortunately the
* blit ring generates interrupts properly, so use it instead.
*/
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
uint32_t plane_bit = 0;
int ret;
 
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
 
switch(intel_crtc->plane) {
case PLANE_A:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
break;
case PLANE_B:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
break;
case PLANE_C:
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
break;
default:
WARN_ONCE(1, "unknown plane in flip command\n");
ret = -ENODEV;
goto err_unpin;
}
 
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
 
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
return 0;
 
err_unpin:
intel_unpin_fb_obj(obj);
err:
return ret;
}
 
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj)
{
return -ENODEV;
}
 
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
int ret;
 
/* Can't change pixel format via MI display flips. */
if (fb->pixel_format != crtc->fb->pixel_format)
return -EINVAL;
 
/*
* TILEOFF/LINOFF registers can't be changed via MI display flips.
* Note that pitch changes could also affect these register.
*/
if (INTEL_INFO(dev)->gen > 3 &&
(fb->offsets[0] != crtc->fb->offsets[0] ||
fb->pitches[0] != crtc->fb->pitches[0]))
return -EINVAL;
 
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
 
work->event = event;
work->dev = crtc->dev;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
 
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto free_work;
 
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
drm_vblank_put(dev, intel_crtc->pipe);
 
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
}
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
 
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
drm_gem_object_reference(&obj->base);
 
crtc->fb = fb;
 
work->pending_flip_obj = obj;
 
static void intel_sanitize_modesetting(struct drm_device *dev,
int pipe, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg, val;
work->enable_stall_check = true;
 
if (HAS_PCH_SPLIT(dev))
return;
 
/* Who knows what state these registers were left in by the BIOS or
* grub?
*
* If we leave the registers in a conflicting state (e.g. with the
* display plane reading from the other pipe than the one we intend
* to use) then when we attempt to teardown the active mode, we will
* not disable the pipes and planes in the correct order -- leaving
* a plane reading from a disabled pipe and possibly leading to
* undefined behaviour.
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 
reg = DSPCNTR(plane);
val = I915_READ(reg);
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
goto cleanup_pending;
 
if ((val & DISPLAY_PLANE_ENABLE) == 0)
return;
if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
return;
intel_disable_fbc(dev);
intel_mark_fb_busy(obj);
mutex_unlock(&dev->struct_mutex);
 
/* This display plane is active and attached to the other CPU pipe. */
pipe = !pipe;
trace_i915_flip_request(intel_crtc->plane, obj);
 
/* Disable the plane and wait for it to stop reading from the pipe. */
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
}
return 0;
 
static void intel_crtc_reset(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
 
/* Reset flags back to the 'unknown' status so that they
* will be correctly set on the initial modeset.
*/
intel_crtc->dpms_mode = -1;
cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
/* We need to fix up any BIOS configuration that conflicts with
* our expectations.
*/
intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
drm_vblank_put(dev, intel_crtc->pipe);
free_work:
kfree(work);
 
return ret;
}
 
#endif
 
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.dpms = intel_crtc_dpms,
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.disable = intel_crtc_disable,
.disable = intel_crtc_noop,
};
 
static const struct drm_crtc_funcs intel_crtc_funcs = {
.reset = intel_crtc_reset,
// .cursor_set = intel_crtc_cursor_set,
// .cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = intel_crtc_destroy,
// .page_flip = intel_crtc_page_flip,
};
 
static void intel_crtc_init(struct drm_device *dev, int pipe)
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
struct intel_encoder *other_encoder;
struct drm_crtc *crtc = &encoder->new_crtc->base;
 
intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (intel_crtc == NULL)
return;
if (WARN_ON(!crtc))
return false;
 
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
list_for_each_entry(other_encoder,
&crtc->dev->mode_config.encoder_list,
base.head) {
 
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
intel_crtc->lut_b[i] = i;
if (&other_encoder->new_crtc->base != crtc ||
encoder == other_encoder)
continue;
else
return true;
}
 
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
return false;
}
 
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
struct drm_crtc *crtc)
{
struct drm_device *dev;
struct drm_crtc *tmp;
int crtc_mask = 1;
 
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
intel_crtc->bpp = 24; /* default for pre-Ironlake */
WARN(!crtc, "checking null crtc?\n");
 
if (HAS_PCH_SPLIT(dev)) {
if (pipe == 2 && IS_IVYBRIDGE(dev))
intel_crtc->no_pll = true;
intel_helper_funcs.prepare = ironlake_crtc_prepare;
intel_helper_funcs.commit = ironlake_crtc_commit;
} else {
intel_helper_funcs.prepare = i9xx_crtc_prepare;
intel_helper_funcs.commit = i9xx_crtc_commit;
dev = crtc->dev;
 
list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
if (tmp == crtc)
break;
crtc_mask <<= 1;
}
 
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
if (encoder->possible_crtcs & crtc_mask)
return true;
return false;
}
 
intel_crtc->busy = false;
/**
* intel_modeset_update_staged_output_state
*
* Updates the staged output configuration state, e.g. after we've read out the
* current hw state.
*/
static void intel_modeset_update_staged_output_state(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
connector->new_encoder =
to_intel_encoder(connector->base.encoder);
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
}
 
 
 
 
 
 
static int intel_encoder_clones(struct drm_device *dev, int type_mask)
/**
* intel_modeset_commit_output_state
*
* This function copies the stage display pipe configuration to the real one.
*/
static void intel_modeset_commit_output_state(struct drm_device *dev)
{
struct intel_encoder *encoder;
int index_mask = 0;
int entry = 0;
struct intel_connector *connector;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
if (type_mask & encoder->clone_mask)
index_mask |= (1 << entry);
entry++;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
connector->base.encoder = &connector->new_encoder->base;
}
 
return index_mask;
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
encoder->base.crtc = &encoder->new_crtc->base;
}
}
 
static bool has_edp_a(struct drm_device *dev)
static struct drm_display_mode *
intel_modeset_adjusted_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
 
if (!IS_MOBILE(dev))
return false;
adjusted_mode = drm_mode_duplicate(dev, mode);
if (!adjusted_mode)
return ERR_PTR(-ENOMEM);
 
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
 
if (IS_GEN5(dev) &&
(I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
return false;
if (&encoder->new_crtc->base != crtc)
continue;
encoder_funcs = encoder->base.helper_private;
if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
adjusted_mode))) {
DRM_DEBUG_KMS("Encoder fixup failed\n");
goto fail;
}
}
 
return true;
if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
goto fail;
}
DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
static void intel_setup_outputs(struct drm_device *dev)
return adjusted_mode;
fail:
drm_mode_destroy(dev, adjusted_mode);
return ERR_PTR(-EINVAL);
}
 
/* Computes which crtcs are affected and sets the relevant bits in the mask. For
* simplicity we use the crtc's pipe number (because it's easier to obtain). */
static void
intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
unsigned *prepare_pipes, unsigned *disable_pipes)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds = false;
struct intel_connector *connector;
struct drm_crtc *tmp_crtc;
 
ENTER();
*disable_pipes = *modeset_pipes = *prepare_pipes = 0;
 
if (IS_MOBILE(dev) && !IS_I830(dev))
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
}
/* Check which crtcs have changed outputs connected to them, these need
* to be part of the prepare_pipes mask. We don't (yet) support global
* modeset across multiple crtcs, so modeset_pipes will only have one
* bit set at most. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->base.encoder == &connector->new_encoder->base)
continue;
 
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
if (connector->base.encoder) {
tmp_crtc = connector->base.encoder->crtc;
 
if (has_edp_a(dev))
intel_dp_init(dev, DP_A);
*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
}
 
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
if (connector->new_encoder)
*prepare_pipes |=
1 << connector->new_encoder->new_crtc->pipe;
}
 
intel_crt_init(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->base.crtc == &encoder->new_crtc->base)
continue;
 
if (HAS_PCH_SPLIT(dev)) {
int found;
if (encoder->base.crtc) {
tmp_crtc = encoder->base.crtc;
 
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB);
if (!found)
intel_hdmi_init(dev, HDMIB);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_B);
*prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
}
 
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC);
if (encoder->new_crtc)
*prepare_pipes |= 1 << encoder->new_crtc->pipe;
}
 
if (I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID);
/* Check for any pipes that will be fully disabled ... */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
bool used = false;
 
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C);
/* Don't try to disable disabled crtcs. */
if (!intel_crtc->base.enabled)
continue;
 
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
 
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
 
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->new_crtc == intel_crtc)
used = true;
}
 
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
if (!used)
*disable_pipes |= 1 << intel_crtc->pipe;
}
}
 
/* Before G4X SDVOC doesn't have its own detect register */
 
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
}
/* set_mode is also used to update properties on life display pipes. */
intel_crtc = to_intel_crtc(crtc);
if (crtc->enabled)
*prepare_pipes |= 1 << intel_crtc->pipe;
 
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
/* We only support modeset on one single crtc, hence we need to do that
* only for the passed in crtc iff we change anything else than just
* disable crtcs.
*
* This is actually not true, to be fully compatible with the old crtc
* helper we automatically disable _any_ output (i.e. doesn't need to be
* connected to the crtc we're modesetting on) if it's disconnected.
* Which is a rather nutty api (since changed the output configuration
* without userspace's explicit request can lead to confusion), but
* alas. Hence we currently need to modeset on all pipes we prepare. */
if (*prepare_pipes)
*modeset_pipes = *prepare_pipes;
 
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
/* ... and mask these out. */
*modeset_pipes &= ~(*disable_pipes);
*prepare_pipes &= ~(*disable_pipes);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
}
}
 
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
static bool intel_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_encoder *encoder;
struct drm_device *dev = crtc->dev;
 
// if (SUPPORTS_TV(dev))
// intel_tv_init(dev);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc)
return true;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(dev, encoder->clone_mask);
return false;
}
 
/* disable all the possible outputs/crtcs before entering KMS mode */
// drm_helper_disable_unused_functions(dev);
static void
intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
{
struct intel_encoder *intel_encoder;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
 
if (HAS_PCH_SPLIT(dev))
ironlake_init_pch_refclk(dev);
list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
base.head) {
if (!intel_encoder->base.crtc)
continue;
 
LEAVE();
intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
 
if (prepare_pipes & (1 << intel_crtc->pipe))
intel_encoder->connectors_active = false;
}
 
intel_modeset_commit_output_state(dev);
 
/* Update computed state. */
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
base.head) {
intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder || !connector->encoder->crtc)
continue;
 
intel_crtc = to_intel_crtc(connector->encoder->crtc);
 
if (prepare_pipes & (1 << intel_crtc->pipe)) {
struct drm_property *dpms_property =
dev->mode_config.dpms_property;
 
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
dpms_property,
DRM_MODE_DPMS_ON);
 
intel_encoder = to_intel_encoder(connector->encoder);
intel_encoder->connectors_active = true;
}
}
 
static const struct drm_framebuffer_funcs intel_fb_funcs = {
// .destroy = intel_user_framebuffer_destroy,
// .create_handle = intel_user_framebuffer_create_handle,
};
}
 
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
base.head) \
if (mask & (1 <<(intel_crtc)->pipe)) \
 
void
intel_modeset_check_state(struct drm_device *dev)
{
int ret;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
 
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* This also checks the encoder/connector hw state with the
* ->get_hw_state callbacks. */
intel_connector_check_state(connector);
 
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
 
switch (mode_cmd->pixel_format) {
case DRM_FORMAT_RGB332:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
/* RGB formats are common across chipsets */
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
break;
default:
DRM_ERROR("unsupported pixel format\n");
return -EINVAL;
WARN(&connector->new_encoder->base != connector->base.encoder,
"connector's staged encoder doesn't match current encoder\n");
}
 
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
bool enabled = false;
bool active = false;
enum pipe pipe, tracked_pipe;
 
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
return 0;
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base));
 
WARN(&encoder->new_crtc->base != encoder->base.crtc,
"encoder's stage crtc doesn't match current crtc\n");
WARN(encoder->connectors_active && !encoder->base.crtc,
"encoder's active_connectors set, but no crtc\n");
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->base.encoder != &encoder->base)
continue;
enabled = true;
if (connector->base.dpms != DRM_MODE_DPMS_OFF)
active = true;
}
WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
!!encoder->base.crtc, enabled);
WARN(active && !encoder->base.crtc,
"active encoder with no crtc\n");
 
WARN(encoder->connectors_active != active,
"encoder's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, encoder->connectors_active);
 
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL /*intel_user_framebuffer_create*/,
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
};
active = encoder->get_hw_state(encoder, &pipe);
WARN(active != encoder->connectors_active,
"encoder's hw state doesn't match sw tracking "
"(expected %i, found %i)\n",
encoder->connectors_active, active);
 
if (!encoder->base.crtc)
continue;
 
tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
WARN(active && pipe != tracked_pipe,
"active encoder's pipe doesn't match"
"(expected %i, found %i)\n",
tracked_pipe, pipe);
 
}
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
base.head) {
bool enabled = false;
bool active = false;
 
DRM_DEBUG_KMS("[CRTC:%d]\n",
crtc->base.base.id);
 
WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->base.crtc != &crtc->base)
continue;
enabled = true;
if (encoder->connectors_active)
active = true;
}
WARN(active != crtc->active,
"crtc's computed active state doesn't match tracked active state "
"(expected %i, found %i)\n", active, crtc->active);
WARN(enabled != crtc->base.enabled,
"crtc's computed enabled state doesn't match tracked enabled state "
"(expected %i, found %i)\n", enabled, crtc->base.enabled);
 
assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
}
}
 
bool ironlake_set_drps(struct drm_device *dev, u8 val)
bool intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
 
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
return false; /* still busy with another command */
}
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
 
rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
I915_WRITE16(MEMSWCTL, rgvswctl);
POSTING_READ16(MEMSWCTL);
DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
modeset_pipes, prepare_pipes, disable_pipes);
 
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE16(MEMSWCTL, rgvswctl);
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
return true;
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
 
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
* Hence simply check whether any bit is set in modeset_pipes in all the
* pieces of code that are not yet converted to deal with mutliple crtcs
* changing their mode at the same time. */
adjusted_mode = NULL;
if (modeset_pipes) {
adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
if (IS_ERR(adjusted_mode)) {
return false;
}
}
 
void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
if (intel_crtc->base.enabled)
dev_priv->display.crtc_disable(&intel_crtc->base);
}
 
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
* to set it here already despite that we pass it down the callchain.
*/
if (modeset_pipes)
crtc->mode = *mode;
 
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
/* Only after disabling all output pipelines that will be changed can we
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
 
/* Set max/min thresholds to 90ms and 80ms respectively */
I915_WRITE(RCBMAXAVG, 90000);
I915_WRITE(RCBMINAVG, 80000);
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
ret = !intel_crtc_mode_set(&intel_crtc->base,
mode, adjusted_mode,
x, y, fb);
if (!ret)
goto done;
 
I915_WRITE(MEMIHYST, 1);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
if (encoder->crtc != &intel_crtc->base)
continue;
 
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, drm_get_encoder_name(encoder),
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
}
 
dev_priv->fmax = fmax; /* IPS callback will increase this */
dev_priv->fstart = fstart;
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
dev_priv->display.crtc_enable(&intel_crtc->base);
 
dev_priv->max_delay = fstart;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
if (modeset_pipes) {
/* Store real post-adjustment hardware mode. */
crtc->hwmode = *adjusted_mode;
 
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
 
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
/*
* Interrupts will be enabled in ironlake_irq_postinstall
/* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
drm_calc_timestamping_constants(crtc);
}
 
I915_WRITE(VIDSTART, vstart);
POSTING_READ(VIDSTART);
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret && crtc->enabled) {
crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
} else {
intel_modeset_check_state(dev);
}
 
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
return ret;
}
 
if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
msleep(1);
#undef for_each_intel_crtc_masked
 
ironlake_set_drps(dev, fstart);
static void intel_set_config_free(struct intel_set_config *config)
{
if (!config)
return;
 
dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
// dev_priv->last_time1 = jiffies_to_msecs(jiffies);
dev_priv->last_count2 = I915_READ(0x112f4);
// getrawmonotonic(&dev_priv->last_time2);
kfree(config->save_connector_encoders);
kfree(config->save_encoder_crtcs);
kfree(config);
}
 
static int intel_set_config_save_state(struct drm_device *dev,
struct intel_set_config *config)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
int count;
 
config->save_encoder_crtcs =
kcalloc(dev->mode_config.num_encoder,
sizeof(struct drm_crtc *), GFP_KERNEL);
if (!config->save_encoder_crtcs)
return -ENOMEM;
 
config->save_connector_encoders =
kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_encoder *), GFP_KERNEL);
if (!config->save_connector_encoders)
return -ENOMEM;
 
/* Copy data. Note that driver private data is not affected.
* Should anything bad happen only the expected state is
* restored, not the drivers personal bookkeeping.
*/
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
config->save_encoder_crtcs[count++] = encoder->crtc;
}
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
config->save_connector_encoders[count++] = connector->encoder;
}
 
return 0;
}
 
static void intel_set_config_restore_state(struct drm_device *dev,
struct intel_set_config *config)
{
struct intel_encoder *encoder;
struct intel_connector *connector;
int count;
 
count = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
}
 
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
connector->new_encoder =
to_intel_encoder(config->save_connector_encoders[count++]);
}
}
 
 
 
static unsigned long intel_pxfreq(u32 vidfreq)
static void
intel_set_config_compute_mode_changes(struct drm_mode_set *set,
struct intel_set_config *config)
{
unsigned long freq;
int div = (vidfreq & 0x3f0000) >> 16;
int post = (vidfreq & 0x3000) >> 12;
int pre = (vidfreq & 0x7);
 
if (!pre)
return 0;
/* We should be able to check here if the fb has the same properties
* and then just flip_or_move it */
if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
config->mode_changed = true;
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->depth != set->crtc->fb->depth) {
config->mode_changed = true;
} else if (set->fb->bits_per_pixel !=
set->crtc->fb->bits_per_pixel) {
config->mode_changed = true;
} else
config->fb_changed = true;
}
 
freq = ((div * 133333) / ((1<<post) * pre));
if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
config->fb_changed = true;
 
return freq;
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
DRM_DEBUG_KMS("modes are different, full mode set\n");
drm_mode_debug_printmodeline(&set->crtc->mode);
drm_mode_debug_printmodeline(set->mode);
config->mode_changed = true;
}
}
 
void intel_init_emon(struct drm_device *dev)
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
struct intel_set_config *config)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
int count, ro;
 
/* Disable to program */
I915_WRITE(ECR, 0);
POSTING_READ(ECR);
/* The upper layers ensure that we either disabl a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
 
/* Program energy weights for various events */
I915_WRITE(SDEW, 0x15040d00);
I915_WRITE(CSIEW0, 0x007f0000);
I915_WRITE(CSIEW1, 0x1e220004);
I915_WRITE(CSIEW2, 0x04000004);
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* Otherwise traverse passed in connector list and get encoders
* for them. */
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base) {
connector->new_encoder = connector->encoder;
break;
}
}
 
for (i = 0; i < 5; i++)
I915_WRITE(PEW + (i * 4), 0);
for (i = 0; i < 3; i++)
I915_WRITE(DEW + (i * 4), 0);
/* If we disable the crtc, disable all its connectors. Also, if
* the connector is on the changing crtc but not on the new
* connector list, disable it. */
if ((!set->fb || ro == set->num_connectors) &&
connector->base.encoder &&
connector->base.encoder->crtc == set->crtc) {
connector->new_encoder = NULL;
 
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
unsigned long val;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
drm_get_connector_name(&connector->base));
}
 
val = vid * vid;
val *= (freq / 1000);
val *= 255;
val /= (127*127*900);
if (val > 0xff)
DRM_ERROR("bad pxval: %ld\n", val);
pxw[i] = val;
 
if (&connector->new_encoder->base != connector->base.encoder) {
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}
/* Render standby states get 0 weight */
pxw[14] = 0;
pxw[15] = 0;
 
for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
I915_WRITE(PXW + (i * 4), val);
/* Disable all disconnected encoders. */
if (connector->base.status == connector_status_disconnected)
connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */
 
/* Adjust magic regs to magic values (more experimental results) */
I915_WRITE(OGW0, 0);
I915_WRITE(OGW1, 0);
I915_WRITE(EG0, 0x00007f00);
I915_WRITE(EG1, 0x0000000e);
I915_WRITE(EG2, 0x000e0000);
I915_WRITE(EG3, 0x68000300);
I915_WRITE(EG4, 0x42000000);
I915_WRITE(EG5, 0x00140031);
I915_WRITE(EG6, 0);
I915_WRITE(EG7, 0);
/* Update crtc of enabled connectors. */
count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (!connector->new_encoder)
continue;
 
for (i = 0; i < 8; i++)
I915_WRITE(PXWL + (i * 4), 0);
new_crtc = connector->new_encoder->base.crtc;
 
/* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019);
for (ro = 0; ro < set->num_connectors; ro++) {
if (set->connectors[ro] == &connector->base)
new_crtc = set->crtc;
}
 
lcfuse = I915_READ(LCFUSE02);
/* Make sure the new CRTC will work with the encoder */
if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
new_crtc)) {
return -EINVAL;
}
connector->encoder->new_crtc = to_intel_crtc(new_crtc);
 
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
drm_get_connector_name(&connector->base),
new_crtc->base.id);
}
 
static bool intel_enable_rc6(struct drm_device *dev)
{
/*
* Respect the kernel parameter if it is set
*/
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
/* Check for any encoders that needs to be disabled. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->new_encoder == encoder) {
WARN_ON(!connector->new_encoder->new_crtc);
 
/*
* Disable RC6 on Ironlake
*/
if (INTEL_INFO(dev)->gen == 5)
return 0;
goto next_encoder;
}
}
encoder->new_crtc = NULL;
next_encoder:
/* Only now check for crtc changes so we don't miss encoders
* that will be disabled. */
if (&encoder->new_crtc->base != encoder->base.crtc) {
DRM_DEBUG_KMS("crtc changed, full mode switch\n");
config->mode_changed = true;
}
}
/* Now we've also updated encoder->new_crtc for all encoders. */
 
/*
* Disable rc6 on Sandybridge
*/
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
return 0;
}
DRM_DEBUG_DRIVER("RC6 enabled\n");
return 1;
}
 
void gen6_enable_rps(struct drm_i915_private *dev_priv)
static int intel_crtc_set_config(struct drm_mode_set *set)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 pcu_mbox, rc6_mask = 0;
int cur_freq, min_freq, max_freq;
int i;
struct drm_device *dev;
struct drm_mode_set save_set;
struct intel_set_config *config;
int ret;
 
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
*
* Perhaps there might be some value in exposing these to
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
mutex_lock(&dev_priv->dev->struct_mutex);
gen6_gt_force_wake_get(dev_priv);
BUG_ON(!set);
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
 
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
if (!set->mode)
set->fb = NULL;
 
I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
/* The fb helper likes to play gross jokes with ->mode_set_config.
* Unfortunately the crtc helper doesn't do much at all for this case,
* so we have to cope with this madness until the fb helper is fixed up. */
if (set->fb && set->num_connectors == 0)
return 0;
 
for (i = 0; i < I915_NUM_RINGS; i++)
I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id,
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
}
 
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
dev = set->crtc->dev;
 
if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
ret = -ENOMEM;
config = kzalloc(sizeof(*config), GFP_KERNEL);
if (!config)
goto out_config;
 
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
ret = intel_set_config_save_state(dev, config);
if (ret)
goto out_config;
 
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(10) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN6_FREQUENCY(12));
save_set.crtc = set->crtc;
save_set.mode = &set->crtc->mode;
save_set.x = set->crtc->x;
save_set.y = set->crtc->y;
save_set.fb = set->crtc->fb;
 
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
18 << 24 |
6 << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_CONT);
/* Compute whether we need a full modeset, only an fb base update or no
* change at all. In the future we might also check whether only the
* mode changed, e.g. for LVDS where we only change the panel fitter in
* such cases. */
intel_set_config_compute_mode_changes(set, config);
 
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
ret = intel_modeset_stage_output_state(dev, set, config);
if (ret)
goto fail;
 
I915_WRITE(GEN6_PCODE_DATA, 0);
I915_WRITE(GEN6_PCODE_MAILBOX,
GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (config->mode_changed) {
if (set->mode) {
DRM_DEBUG_KMS("attempting to set mode from"
" userspace\n");
drm_mode_debug_printmodeline(set->mode);
}
 
min_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq = rp_state_cap & 0xff;
cur_freq = (gt_perf_status & 0xff00) >> 8;
 
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
max_freq = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
if (!intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
ret = -EINVAL;
goto fail;
}
} else if (config->fb_changed) {
ret = intel_pipe_set_base(set->crtc,
set->x, set->y, set->fb);
}
 
/* In units of 100MHz */
dev_priv->max_delay = max_freq;
dev_priv->min_delay = min_freq;
dev_priv->cur_delay = cur_freq;
intel_set_config_free(config);
 
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,
GEN6_PM_MBOX_EVENT |
GEN6_PM_THERMAL_EVENT |
GEN6_PM_RP_DOWN_TIMEOUT |
GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_UP_EI_EXPIRED |
GEN6_PM_RP_DOWN_EI_EXPIRED);
// spin_lock_irq(&dev_priv->rps_lock);
// WARN_ON(dev_priv->pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
// spin_unlock_irq(&dev_priv->rps_lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
return 0;
 
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev_priv->dev->struct_mutex);
fail:
intel_set_config_restore_state(dev, config);
 
/* Try to restore the config */
if (config->mode_changed &&
!intel_set_mode(save_set.crtc, save_set.mode,
save_set.x, save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
 
out_config:
intel_set_config_free(config);
return ret;
}
 
void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
static const struct drm_crtc_funcs intel_crtc_funcs = {
// .cursor_set = intel_crtc_cursor_set,
// .cursor_move = intel_crtc_cursor_move,
.gamma_set = intel_crtc_gamma_set,
.set_config = intel_crtc_set_config,
.destroy = intel_crtc_destroy,
// .page_flip = intel_crtc_page_flip,
};
 
static void intel_pch_pll_init(struct drm_device *dev)
{
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
 
// max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
* over
*/
// if (!max_ia_freq)
max_ia_freq = 3000000; //tsc_khz;
if (dev_priv->num_pch_pll == 0) {
DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
return;
}
 
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
for (i = 0; i < dev_priv->num_pch_pll; i++) {
dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
}
}
 
mutex_lock(&dev_priv->dev->struct_mutex);
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc;
int i;
 
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
gpu_freq--) {
int diff = dev_priv->max_delay - gpu_freq;
intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (intel_crtc == NULL)
return;
 
/*
* For GPU frequencies less than 750MHz, just use the lowest
* ring freq.
*/
if (gpu_freq < min_freq)
ia_freq = 800;
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
 
I915_WRITE(GEN6_PCODE_DATA,
(ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode write of freq table timed out\n");
continue;
drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
for (i = 0; i < 256; i++) {
intel_crtc->lut_r[i] = i;
intel_crtc->lut_g[i] = i;
intel_crtc->lut_b[i] = i;
}
}
 
mutex_unlock(&dev_priv->dev->struct_mutex);
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
}
 
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
 
/* Required for FBC */
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
DPFCRUNIT_CLOCK_GATE_DISABLE |
DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
intel_crtc->bpp = 24; /* default for pre-Ironlake */
 
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
dbgprintf("CRTC %d mode %x FB %x enable %d\n",
intel_crtc->base.base.id, intel_crtc->base.mode,
intel_crtc->base.fb, intel_crtc->base.enabled);
 
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
* The bit 22/21 of 0x42004
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
I915_WRITE(ILK_DSPCLK_GATE,
(I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE));
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
}
 
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
* The bit 22 of 0x42000
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPFC_DIS1 |
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
 
drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
DRM_MODE_OBJECT_CRTC);
 
if (!drmmode_obj) {
DRM_ERROR("no such CRTC id\n");
return -EINVAL;
}
 
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
pipe_from_crtc_id->pipe = crtc->pipe;
 
return 0;
}
 
static void gen6_init_clock_gating(struct drm_device *dev)
static int intel_encoder_clones(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
struct drm_device *dev = encoder->base.dev;
struct intel_encoder *source_encoder;
int index_mask = 0;
int entry = 0;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
list_for_each_entry(source_encoder,
&dev->mode_config.encoder_list, base.head) {
 
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
if (encoder == source_encoder)
index_mask |= (1 << entry);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
/* Intel hw has only one MUX where enocoders could be cloned. */
if (encoder->cloneable && source_encoder->cloneable)
index_mask |= (1 << entry);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
entry++;
}
 
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
* The bit21 and bit22 of 0x42000
* The bit21 and bit22 of 0x42004
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
return index_mask;
}
}
 
static void ivybridge_init_clock_gating(struct drm_device *dev)
static bool has_edp_a(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
if (!IS_MOBILE(dev))
return false;
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
if (IS_GEN5(dev) &&
(I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
return false;
 
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
return true;
}
}
 
static void g4x_init_clock_gating(struct drm_device *dev)
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
bool has_lvds;
 
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
I915_WRITE(RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
has_lvds = intel_lvds_init(dev);
if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
/* disable the panel fitter on everything but LVDS */
I915_WRITE(PFIT_CONTROL, 0);
}
 
static void crestline_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
 
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
 
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D, PORT_D);
}
 
static void broadwater_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_crt_init(dev);
 
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
}
if (IS_HASWELL(dev)) {
int found;
 
static void gen3_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dstate = I915_READ(D_STATE);
/* Haswell uses DDI functions to detect digital outputs */
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
/* DDI A only supports eDP */
if (found)
intel_ddi_init(dev, PORT_A);
 
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
}
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
* register */
found = I915_READ(SFUSE_STRAP);
 
static void i85x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev, PORT_B);
if (found & SFUSE_STRAP_DDIC_DETECTED)
intel_ddi_init(dev, PORT_C);
if (found & SFUSE_STRAP_DDID_DETECTED)
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
 
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
if (!found)
intel_hdmi_init(dev, HDMIB, PORT_B);
if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_B, PORT_B);
}
 
static void i830_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC, PORT_C);
 
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID, PORT_D);
 
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
 
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
if (!found)
intel_hdmi_init(dev, SDVOB, PORT_B);
if (!found && (I915_READ(DP_B) & DP_DETECTED))
intel_dp_init(dev, DP_B, PORT_B);
}
 
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* Without this, mode sets may fail silently on FDI */
for_each_pipe(pipe)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
/* Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
 
static void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->renderctx) {
// i915_gem_object_unpin(dev_priv->renderctx);
// drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB, true);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB, PORT_B);
}
 
if (dev_priv->pwrctx) {
// i915_gem_object_unpin(dev_priv->pwrctx);
// drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B, PORT_B);
}
}
 
static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Before G4X SDVOC doesn't have its own detect register */
 
if (I915_READ(PWRCTXA)) {
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
50);
if (I915_READ(SDVOB) & SDVO_DETECTED) {
DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC, false);
}
 
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
 
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
if (SUPPORTS_INTEGRATED_HDMI(dev)) {
DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC, PORT_C);
}
if (SUPPORTS_INTEGRATED_DP(dev)) {
DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C, PORT_C);
}
}
 
ironlake_teardown_rc6(dev);
if (SUPPORTS_INTEGRATED_DP(dev) &&
(I915_READ(DP_D) & DP_DETECTED)) {
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D, PORT_D);
}
} else if (IS_GEN2(dev))
intel_dvo_init(dev);
 
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
// if (SUPPORTS_TV(dev))
// intel_tv_init(dev);
 
if (dev_priv->renderctx == NULL)
// dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
return -ENOMEM;
 
if (dev_priv->pwrctx == NULL)
// dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(encoder);
}
 
return 0;
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
}
 
void ironlake_enable_rc6(struct drm_device *dev)
 
 
static const struct drm_framebuffer_funcs intel_fb_funcs = {
// .destroy = intel_user_framebuffer_destroy,
// .create_handle = intel_user_framebuffer_create_handle,
};
 
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!intel_enable_rc6(dev))
return;
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
 
mutex_lock(&dev->struct_mutex);
ret = ironlake_setup_rc6(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return;
}
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
 
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
#if 0
ret = BEGIN_LP_RING(6);
if (ret) {
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
switch (mode_cmd->pixel_format) {
case DRM_FORMAT_RGB332:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
/* RGB formats are common across chipsets */
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
break;
default:
DRM_DEBUG_KMS("unsupported pixel format %u\n",
mode_cmd->pixel_format);
return -EINVAL;
}
 
OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
OUT_RING(MI_SET_CONTEXT);
OUT_RING(dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
OUT_RING(MI_SUSPEND_FLUSH);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
 
/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(LP_RING(dev_priv));
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
mutex_unlock(&dev->struct_mutex);
return;
DRM_ERROR("framebuffer init failed %d\n", ret);
return ret;
}
#endif
 
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
mutex_unlock(&dev->struct_mutex);
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
return 0;
}
 
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->display.init_clock_gating(dev);
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL /*intel_user_framebuffer_create*/,
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
};
 
if (dev_priv->display.init_pch_clock_gating)
dev_priv->display.init_pch_clock_gating(dev);
}
 
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
7847,35 → 7760,25
 
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
dev_priv->display.off = ironlake_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
dev_priv->display.off = i9xx_crtc_off;
dev_priv->display.update_plane = i9xx_update_plane;
}
 
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
}
/* 855GM needs testing */
}
 
/* Returns the core display clock speed */
if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
if (IS_VALLEYVIEW(dev))
dev_priv->display.get_display_clock_speed =
valleyview_get_display_clock_speed;
else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
dev_priv->display.get_display_clock_speed =
i945_get_display_clock_speed;
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
7896,153 → 7799,33
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
 
/* IVB configs may use multi-threaded forcewake */
if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
 
/* A small trick here - if the bios hasn't configured MT forcewake,
* and if the device is in RC6, then force_wake_mt_get will not wake
* the device and the ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT forcewake being
* disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = I915_READ_NOTRACE(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
DRM_DEBUG_KMS("Using MT version of forcewake\n");
dev_priv->display.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->display.force_wake_put =
__gen6_gt_force_wake_mt_put;
}
}
 
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
 
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
dev_priv->display.write_eld = ironlake_write_eld;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
DRM_INFO("failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
dev_priv->display.update_wm = i965_update_wm;
if (IS_CRESTLINE(dev))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
else if (IS_BROADWATER(dev))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
} else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_I865G(dev)) {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
} else if (IS_I85X(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i85x_get_fifo_size;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
} else {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i830_init_clock_gating;
if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
 
/* Default just returns -ENODEV to indicate unsupported */
// dev_priv->display.queue_flip = intel_default_queue_flip;
 
#if 0
switch (INTEL_INFO(dev)->gen) {
case 2:
dev_priv->display.queue_flip = intel_gen2_queue_flip;
break;
 
case 3:
dev_priv->display.queue_flip = intel_gen3_queue_flip;
break;
 
case 4:
case 5:
dev_priv->display.queue_flip = intel_gen4_queue_flip;
break;
 
case 6:
dev_priv->display.queue_flip = intel_gen6_queue_flip;
break;
case 7:
dev_priv->display.queue_flip = intel_gen7_queue_flip;
break;
}
#endif
}
 
/*
* Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8054,7 → 7837,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->quirks |= QUIRK_PIPEA_FORCE;
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
DRM_INFO("applying pipe a force quirk\n");
}
 
/*
8064,8 → 7847,20
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
DRM_INFO("applying lvds SSC disable quirk\n");
}
 
/*
* A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
* brightness value
*/
static void quirk_invert_brightness(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
DRM_INFO("applying inverted panel brightness quirk\n");
}
 
struct intel_quirk {
int device;
int subsystem_vendor;
8073,27 → 7868,47
void (*hook)(struct drm_device *dev);
};
 
struct intel_quirk intel_quirks[] = {
/* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */
{ 0x2a42, 0x103c, 0x30eb, quirk_pipea_force },
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
void (*hook)(struct drm_device *dev);
const struct dmi_system_id (*dmi_id_list)[];
};
 
static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
{
DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
return 1;
}
 
static const struct intel_dmi_quirk intel_dmi_quirks[] = {
{
.dmi_id_list = &(const struct dmi_system_id[]) {
{
.callback = intel_dmi_reverse_brightness,
.ident = "NCR Corporation",
.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, ""),
},
},
{ } /* terminating entry */
},
.hook = quirk_invert_brightness,
},
};
 
static struct intel_quirk intel_quirks[] = {
/* HP Mini needs pipe A force quirk (LP: #322104) */
{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
 
/* Thinkpad R31 needs pipe A force quirk */
{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
 
/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
{ 0x3577, 0x1014, 0x0513, quirk_pipea_force },
/* ThinkPad X40 needs pipe A force quirk */
 
/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
 
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
/* 830/845 need to leave pipe A & dpll A up */
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
 
/* Lenovo U160 cannot use SSC on LVDS */
{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
8100,6 → 7915,9
 
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
 
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
};
 
static void intel_init_quirks(struct drm_device *dev)
8117,6 → 7935,10
q->subsystem_device == PCI_ANY_ID))
q->hook(dev);
}
// for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
// if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
// intel_dmi_quirks[i].hook(dev);
// }
}
 
/* Disable the VGA plane that we never use */
8132,9 → 7954,9
vga_reg = VGACNTRL;
 
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
out8(VGA_SR_INDEX, 1);
out8(SR01, VGA_SR_INDEX);
sr1 = in8(VGA_SR_DATA);
out8(VGA_SR_DATA,sr1 | 1<<5);
out8(sr1 | 1<<5, VGA_SR_DATA);
// vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
 
8142,6 → 7964,22
POSTING_READ(vga_reg);
}
 
void intel_modeset_init_hw(struct drm_device *dev)
{
/* We attempt to init the necessary power wells early in the initialization
* time, so the subsystems that expect power to be enabled can work.
*/
intel_init_power_wells(dev);
 
intel_prepare_ddi(dev);
 
intel_init_clock_gating(dev);
 
// mutex_lock(&dev->struct_mutex);
// intel_enable_gt_powersave(dev);
// mutex_unlock(&dev->struct_mutex);
}
 
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
8152,10 → 7990,15
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
 
dev->mode_config.funcs = (void *)&intel_mode_funcs;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
 
dev->mode_config.funcs = &intel_mode_funcs;
 
intel_init_quirks(dev);
 
intel_init_pm(dev);
 
intel_init_display(dev);
 
if (IS_GEN2(dev)) {
8168,7 → 8011,7
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
dev->mode_config.fb_base = get_bus_addr();
dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
 
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8180,36 → 8023,325
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
 
intel_pch_pll_init(dev);
 
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
}
 
intel_init_clock_gating(dev);
static void
intel_connector_break_all_links(struct intel_connector *connector)
{
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
connector->encoder->connectors_active = false;
connector->encoder->base.crtc = NULL;
}
 
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
static void intel_enable_pipe_a(struct drm_device *dev)
{
struct intel_connector *connector;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
 
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
}
}
 
if (IS_GEN6(dev) || IS_GEN7(dev)) {
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
if (!crt)
return;
 
if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
intel_release_load_detect_pipe(crt, &load_detect_temp);
 
 
}
 
// INIT_WORK(&dev_priv->idle_work, intel_idle_update);
// setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
// (unsigned long)dev);
static bool
intel_check_plane_mapping(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
u32 reg, val;
 
if (dev_priv->num_pipe == 1)
return true;
 
reg = DSPCNTR(!crtc->plane);
val = I915_READ(reg);
 
if ((val & DISPLAY_PLANE_ENABLE) &&
(!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
return false;
 
return true;
}
 
static void intel_sanitize_crtc(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
 
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->pipe);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* We need to sanitize the plane -> pipe mapping first because this will
* disable the crtc (and hence change the state) if it is wrong. Note
* that gen4+ has a fixed plane -> pipe mapping. */
if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
struct intel_connector *connector;
bool plane;
 
DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
crtc->base.base.id);
 
/* Pipe has the wrong plane attached and the plane is active.
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
crtc->plane = !plane;
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
 
/* ... and break all links. */
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->encoder->base.crtc != &crtc->base)
continue;
 
intel_connector_break_all_links(connector);
}
 
WARN_ON(crtc->active);
crtc->base.enabled = false;
}
 
if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
crtc->pipe == PIPE_A && !crtc->active) {
/* BIOS forgot to enable pipe A, this mostly happens after
* resume. Force-enable the pipe to fix this, the update_dpms
* call below we restore the pipe to the right state, but leave
* the required bits on. */
intel_enable_pipe_a(dev);
}
 
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
intel_crtc_update_dpms(&crtc->base);
 
if (crtc->active != crtc->base.enabled) {
struct intel_encoder *encoder;
 
/* This can happen either due to bugs in the get_hw_state
* functions or because the pipe is force-enabled due to the
* pipe A quirk. */
DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
crtc->base.base.id,
crtc->base.enabled ? "enabled" : "disabled",
crtc->active ? "enabled" : "disabled");
 
crtc->base.enabled = crtc->active;
 
/* Because we only establish the connector -> encoder ->
* crtc links if something is active, this means the
* crtc is now deactivated. Break the links. connector
* -> encoder links are only establish when things are
* actually up, hence no need to break them. */
WARN_ON(crtc->active);
 
for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->connectors_active);
encoder->base.crtc = NULL;
}
}
}
 
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
struct intel_connector *connector;
struct drm_device *dev = encoder->base.dev;
 
/* We need to check both for a crtc link (meaning that the
* encoder is active and trying to read from a pipe) and the
* pipe itself being active. */
bool has_active_crtc = encoder->base.crtc &&
to_intel_crtc(encoder->base.crtc)->active;
 
if (encoder->connectors_active && !has_active_crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base));
 
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
* the encoder manually again. */
if (encoder->base.crtc) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base));
encoder->disable(encoder);
}
 
/* Inconsistent output/port/pipe state happens presumably due to
* a bug in one of the get_hw_state functions. Or someplace else
* in our code, like the register restore mess on resume. Clamp
* things to off as a safer default. */
list_for_each_entry(connector,
&dev->mode_config.connector_list,
base.head) {
if (connector->encoder != encoder)
continue;
 
intel_connector_break_all_links(connector);
}
}
/* Enabled encoders without active connectors will be fixed in
* the crtc fixup. */
}
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
u32 tmp;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
tmp = I915_READ(PIPECONF(pipe));
if (tmp & PIPECONF_ENABLE)
crtc->active = true;
else
crtc->active = false;
 
crtc->base.enabled = crtc->active;
 
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
crtc->active ? "enabled" : "disabled");
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
 
if (encoder->get_hw_state(encoder, &pipe)) {
encoder->base.crtc =
dev_priv->pipe_to_crtc_mapping[pipe];
} else {
encoder->base.crtc = NULL;
}
 
encoder->connectors_active = false;
DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
encoder->base.crtc ? "enabled" : "disabled",
pipe);
}
 
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
connector->encoder->connectors_active = true;
connector->base.encoder = &connector->encoder->base;
} else {
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id,
drm_get_connector_name(&connector->base),
connector->base.encoder ? "enabled" : "disabled");
}
 
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
intel_sanitize_encoder(encoder);
}
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_sanitize_crtc(crtc);
}
 
intel_modeset_update_staged_output_state(dev);
 
intel_modeset_check_state(dev);
}
 
void intel_modeset_gem_init(struct drm_device *dev)
{
if (IS_IRONLAKE_M(dev))
ironlake_enable_rc6(dev);
intel_modeset_init_hw(dev);
 
// intel_setup_overlay(dev);
 
intel_modeset_setup_hw_state(dev);
}
 
void intel_modeset_cleanup(struct drm_device *dev)
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
 
// drm_kms_helper_poll_fini(dev);
mutex_lock(&dev->struct_mutex);
 
// intel_unregister_dsm_handler();
 
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
continue;
 
intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
 
intel_disable_fbc(dev);
 
intel_disable_gt_powersave(dev);
 
ironlake_teardown_rc6(dev);
 
if (IS_VALLEYVIEW(dev))
vlv_init_dpio(dev);
 
mutex_unlock(&dev->struct_mutex);
 
/* Disable the irq before mode object teardown, for the irq might
* enqueue unpin/hotplug work. */
// drm_irq_uninstall(dev);
// cancel_work_sync(&dev_priv->hotplug_work);
// cancel_work_sync(&dev_priv->rps.work);
 
/* flush any delayed tasks or pending work */
// flush_scheduled_work();
 
drm_mode_config_cleanup(dev);
#endif
}
 
/*
* Return which encoder is currently attached for connector.
*/
8226,4 → 8358,131
&encoder->base);
}
 
/*
* set vga decode state - true == enable VGA decode
*/
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 gmch_ctrl;
 
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
 
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
 
struct intel_display_error_state {
struct intel_cursor_error_state {
u32 control;
u32 position;
u32 base;
u32 size;
} cursor[I915_MAX_PIPES];
 
struct intel_pipe_error_state {
u32 conf;
u32 source;
 
u32 htotal;
u32 hblank;
u32 hsync;
u32 vtotal;
u32 vblank;
u32 vsync;
} pipe[I915_MAX_PIPES];
 
struct intel_plane_error_state {
u32 control;
u32 stride;
u32 size;
u32 pos;
u32 addr;
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
};
 
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
int i;
 
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
 
for_each_pipe(i) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
 
error->plane[i].control = I915_READ(DSPCNTR(i));
error->plane[i].stride = I915_READ(DSPSTRIDE(i));
error->plane[i].size = I915_READ(DSPSIZE(i));
error->plane[i].pos = I915_READ(DSPPOS(i));
error->plane[i].addr = I915_READ(DSPADDR(i));
if (INTEL_INFO(dev)->gen >= 4) {
error->plane[i].surface = I915_READ(DSPSURF(i));
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
 
error->pipe[i].conf = I915_READ(PIPECONF(i));
error->pipe[i].source = I915_READ(PIPESRC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(i));
error->pipe[i].hblank = I915_READ(HBLANK(i));
error->pipe[i].hsync = I915_READ(HSYNC(i));
error->pipe[i].vtotal = I915_READ(VTOTAL(i));
error->pipe[i].vblank = I915_READ(VBLANK(i));
error->pipe[i].vsync = I915_READ(VSYNC(i));
}
 
return error;
}
 
void
intel_display_print_error_state(struct seq_file *m,
struct drm_device *dev,
struct intel_display_error_state *error)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
 
seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
for_each_pipe(i) {
seq_printf(m, "Pipe [%d]:\n", i);
seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
 
seq_printf(m, "Plane [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
seq_printf(m, " POS: %08x\n", error->plane[i].pos);
seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
if (INTEL_INFO(dev)->gen >= 4) {
seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
}
 
seq_printf(m, "Cursor [%d]:\n", i);
seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
seq_printf(m, " POS: %08x\n", error->cursor[i].position);
seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
}
#endif
/drivers/video/drm/i915/intel_dp.c
27,46 → 27,19
 
#include <linux/i2c.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "drm_dp_helper.h"
 
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
 
#define DP_LINK_CONFIGURATION_SIZE 9
 
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
int force_audio;
uint32_t color_range;
int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
bool want_panel_vdd;
};
 
/**
* is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
150,6 → 123,18
*link_bw = 270000;
}
 
int
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
 
if (intel_dp->panel_fixed_mode)
return intel_dp->panel_fixed_mode->clock;
else
return mode->clock;
}
 
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
217,14 → 202,38
return (max_link_clock * max_lanes * 8) / 10;
}
 
static bool
intel_dp_adjust_dithering(struct intel_dp *intel_dp,
struct drm_display_mode *mode,
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
int max_rate, mode_rate;
 
mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 
if (mode_rate > max_rate) {
mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
return false;
 
if (adjust_mode)
mode->private_flags
|= INTEL_MODE_DP_FORCE_6BPC;
 
return true;
}
 
return true;
}
 
static int
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
int max_rate, mode_rate;
 
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
234,20 → 243,15
return MODE_PANEL;
}
 
mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
 
if (mode_rate > max_rate) {
mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
if (!intel_dp_adjust_dithering(intel_dp, mode, false))
return MODE_CLOCK_HIGH;
else
mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
}
 
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
 
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
 
return MODE_OK;
}
 
366,7 → 370,7
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
 
419,6 → 423,10
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
 
if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR))
continue;
if (status & DP_AUX_CH_CTL_DONE)
break;
}
649,7 → 657,7
intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
 
memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
// intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.owner = THIS_MODULE;
intel_dp->adapter.class = I2C_CLASS_DDC;
strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
663,7 → 671,8
}
 
static bool
intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
671,7 → 680,7
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
678,26 → 687,35
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
mode, adjusted_mode);
/*
* the mode->clock is used to calculate the Data&Link M/N
* of the pipe. For the eDP the fixed clock should be used.
*/
mode->clock = intel_dp->panel_fixed_mode->clock;
}
 
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
 
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], adjusted_mode->clock);
 
if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
return false;
 
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
 
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
DRM_DEBUG_KMS("Display port link bw %02x lane "
"count %d clock %d\n",
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
adjusted_mode->clock);
adjusted_mode->clock, bpp);
DRM_DEBUG_KMS("DP link bw required %i available %i\n",
mode_rate, link_avail);
return true;
}
}
744,8 → 762,7
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct intel_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
755,13 → 772,9
/*
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_dp *intel_dp;
for_each_encoder_on_crtc(dev, crtc, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
if (encoder->crtc != crtc)
continue;
 
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
{
795,9 → 808,6
}
}
 
static void ironlake_edp_pll_on(struct drm_encoder *encoder);
static void ironlake_edp_pll_off(struct drm_encoder *encoder);
 
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
808,14 → 818,6
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* Turn on the eDP PLL if needed */
if (is_edp(intel_dp)) {
if (!is_pch_edp(intel_dp))
ironlake_edp_pll_on(encoder);
else
ironlake_edp_pll_off(encoder);
}
 
/*
* There are four kinds of DP registers:
*
837,10 → 839,8
* supposed to be read-only.
*/
intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
/* Handle DP bits in common between all three register formats */
 
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
 
switch (intel_dp->lane_count) {
887,7 → 887,6
intel_dp->DP |= intel_crtc->pipe << 29;
 
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
909,7 → 908,6
 
if (is_cpu_edp(intel_dp)) {
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
1116,13 → 1114,17
 
DRM_DEBUG_KMS("Turn eDP power off\n");
 
WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
 
pp = ironlake_get_pp_control(dev_priv);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
 
intel_dp->want_panel_vdd = false;
 
ironlake_wait_panel_off(intel_dp);
}
 
1166,27 → 1168,49
msleep(intel_dp->backlight_off_delay);
}
 
static void ironlake_edp_pll_on(struct drm_encoder *encoder)
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
assert_pipe_disabled(dev_priv,
to_intel_crtc(crtc)->pipe);
 
DRM_DEBUG_KMS("\n");
dpa_ctl = I915_READ(DP_A);
dpa_ctl |= DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
 
/* We don't adjust intel_dp->DP while tearing down the link, to
* facilitate link retraining (e.g. after hotplug). Hence clear all
* enable bits here to ensure that we don't enable too much. */
intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
intel_dp->DP |= DP_PLL_ENABLE;
I915_WRITE(DP_A, intel_dp->DP);
POSTING_READ(DP_A);
udelay(200);
}
 
static void ironlake_edp_pll_off(struct drm_encoder *encoder)
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
assert_pipe_disabled(dev_priv,
to_intel_crtc(crtc)->pipe);
 
dpa_ctl = I915_READ(DP_A);
WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
"dp pll off, should be on\n");
WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
 
/* We can't rely on the value tracked for the DP register in
* intel_dp->DP because link_down must not change that (otherwise link
* re-training will fail. */
dpa_ctl &= ~DP_PLL_ENABLE;
I915_WRITE(DP_A, dpa_ctl);
POSTING_READ(DP_A);
1223,79 → 1247,105
}
}
 
static void intel_dp_prepare(struct drm_encoder *encoder)
static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp = I915_READ(intel_dp->output_reg);
 
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
if (!(tmp & DP_PORT_EN))
return false;
 
/* Wake up the sink first */
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
*pipe = PORT_TO_PIPE_CPT(tmp);
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
*pipe = PORT_TO_PIPE(tmp);
} else {
u32 trans_sel;
u32 trans_dp;
int i;
 
/* Make sure the panel is off before trying to
* change the mode
*/
switch (intel_dp->output_reg) {
case PCH_DP_B:
trans_sel = TRANS_DP_PORT_SEL_B;
break;
case PCH_DP_C:
trans_sel = TRANS_DP_PORT_SEL_C;
break;
case PCH_DP_D:
trans_sel = TRANS_DP_PORT_SEL_D;
break;
default:
return true;
}
 
static void intel_dp_commit(struct drm_encoder *encoder)
for_each_pipe(i) {
trans_dp = I915_READ(TRANS_DP_CTL(i));
if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
*pipe = i;
return true;
}
}
}
 
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
 
return true;
}
 
static void intel_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
 
intel_dp->dpms_mode = DRM_MODE_DPMS_ON;
/* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
if (!is_cpu_edp(intel_dp))
intel_dp_link_down(intel_dp);
}
 
if (HAS_PCH_CPT(dev))
intel_cpt_verify_modeset(dev, intel_crtc->pipe);
static void intel_post_disable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
if (is_cpu_edp(intel_dp)) {
intel_dp_link_down(intel_dp);
ironlake_edp_pll_off(intel_dp);
}
}
 
static void
intel_dp_dpms(struct drm_encoder *encoder, int mode)
static void intel_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
 
if (mode != DRM_MODE_DPMS_ON) {
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
 
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_off(encoder);
} else {
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_on(encoder);
 
ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
intel_dp_complete_link_train(intel_dp);
} else
ironlake_edp_panel_vdd_off(intel_dp, false);
ironlake_edp_backlight_on(intel_dp);
}
intel_dp->dpms_mode = mode;
 
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
if (is_cpu_edp(intel_dp))
ironlake_edp_pll_on(intel_dp);
}
 
/*
1614,6 → 1664,45
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
 
} else {
dp_reg_value &= ~DP_LINK_TRAIN_MASK;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
dp_reg_value |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
dp_reg_value |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
DRM_ERROR("DP training pattern 3 not supported\n");
dp_reg_value |= DP_LINK_TRAIN_PAT_2;
break;
}
}
 
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
 
1621,6 → 1710,8
DP_TRAINING_PATTERN_SET,
dp_train_pat);
 
if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
DP_TRAINING_PATTERN_DISABLE) {
ret = intel_dp_aux_native_write(intel_dp,
DP_TRAINING_LANE0_SET,
intel_dp->train_set,
1627,6 → 1718,7
intel_dp->lane_count);
if (ret != intel_dp->lane_count)
return false;
}
 
return true;
}
1636,26 → 1728,12
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
int i;
uint8_t voltage;
bool clock_recovery = false;
int voltage_tries, loop_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
 
/*
* On CPT we have to enable the port in training pattern 1, which
* will happen below in intel_dp_set_link_train. Otherwise, enable
* the port and wait for it to become active.
*/
if (!HAS_PCH_CPT(dev)) {
I915_WRITE(intel_dp->output_reg, intel_dp->DP);
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
1663,10 → 1741,6
 
DP |= DP_PORT_EN;
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
memset(intel_dp->train_set, 0, 4);
voltage = 0xff;
voltage_tries = 0;
1690,12 → 1764,7
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
 
if (!intel_dp_set_link_train(intel_dp, reg,
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
1717,7 → 1786,7
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count) {
if (i == intel_dp->lane_count && voltage_tries == 5) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
1750,10 → 1819,8
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool channel_eq = false;
int tries, cr_tries;
u32 reg;
uint32_t DP = intel_dp->DP;
 
/* channel equalization */
1782,13 → 1849,8
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
 
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, reg,
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_2 |
DP_LINK_SCRAMBLING_DISABLE))
break;
1823,15 → 1885,7
++tries;
}
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
 
I915_WRITE(intel_dp->output_reg, reg);
POSTING_READ(intel_dp->output_reg);
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
 
static void
1841,18 → 1895,11
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
 
if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
 
DRM_DEBUG_KMS("\n");
 
if (is_edp(intel_dp)) {
DP &= ~DP_PLL_ENABLE;
I915_WRITE(intel_dp->output_reg, DP);
POSTING_READ(intel_dp->output_reg);
udelay(100);
}
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
1864,14 → 1911,7
 
msleep(17);
 
if (is_edp(intel_dp)) {
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
}
 
if (!HAS_PCH_CPT(dev) &&
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
 
1914,12 → 1954,46
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) &&
(intel_dp->dpcd[DP_DPCD_REV] != 0)) {
sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */
 
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
 
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
 
if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
return true; /* no per-port downstream info */
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
intel_dp->downstream_ports,
DP_MAX_DOWNSTREAM_PORTS) == 0)
return false; /* downstream port status fetch failed */
 
return true;
}
 
return false;
static void
intel_dp_probe_oui(struct intel_dp *intel_dp)
{
u8 buf[3];
 
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
ironlake_edp_panel_vdd_on(intel_dp);
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
ironlake_edp_panel_vdd_off(intel_dp, false);
}
 
static bool
1958,10 → 2032,10
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
 
if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
if (!intel_dp->base.connectors_active)
return;
 
if (!intel_dp->base.base.crtc)
if (WARN_ON(!intel_dp->base.base.crtc))
return;
 
/* Try to read receiver status if the link appears to be up */
1998,11 → 2072,43
}
}
 
/* XXX this is probably wrong for multiple downstream ports */
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
if (intel_dp_get_dpcd(intel_dp))
uint8_t *dpcd = intel_dp->dpcd;
bool hpd;
uint8_t type;
 
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
 
/* if there's no downstream port, we're done */
if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
return connector_status_connected;
 
/* If we're HPD-aware, SINK_COUNT changes dynamically */
hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
if (hpd) {
uint8_t reg;
if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
&reg, 1))
return connector_status_unknown;
return DP_GET_SINK_COUNT(reg) ? connector_status_connected
: connector_status_disconnected;
}
 
/* If no HPD, poke DDC gently */
if (drm_probe_ddc(&intel_dp->adapter))
return connector_status_connected;
 
/* Well we tried, say unknown for unreliable port types */
type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
return connector_status_unknown;
 
/* Anything else is out of spec, warn and ignore */
DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
return connector_status_disconnected;
}
 
2027,25 → 2133,23
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp, bit;
uint32_t bit;
 
switch (intel_dp->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_INT_STATUS;
bit = DPB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
bit = DPC_HOTPLUG_INT_STATUS;
bit = DPC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
bit = DPD_HOTPLUG_INT_STATUS;
bit = DPD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
}
 
temp = I915_READ(PORT_HOTPLUG_STAT);
 
if ((temp & bit) == 0)
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
2056,10 → 2160,22
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct edid *edid;
int size;
 
ironlake_edp_panel_vdd_on(intel_dp);
if (is_edp(intel_dp)) {
if (!intel_dp->edid)
return NULL;
 
size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
if (!edid)
return NULL;
 
memcpy(edid, intel_dp->edid, size);
return edid;
}
 
edid = drm_get_edid(connector, adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
return edid;
}
 
2069,9 → 2185,16
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
 
ironlake_edp_panel_vdd_on(intel_dp);
if (is_edp(intel_dp)) {
drm_mode_connector_update_edid_property(connector,
intel_dp->edid);
ret = drm_add_edid_modes(connector, intel_dp->edid);
drm_edid_to_eld(connector,
intel_dp->edid);
return intel_dp->edid_mode_count;
}
 
ret = intel_ddc_get_modes(connector, adapter);
ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
 
2104,14 → 2227,16
 
if (status != connector_status_connected)
return status;
 
intel_dp_probe_oui(intel_dp);
 
/*
if (intel_dp->force_audio) {
intel_dp->has_audio = intel_dp->force_audio > 0;
} else {
edid = drm_get_edid(connector, &intel_dp->adapter);
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
intel_dp->has_audio = drm_detect_monitor_audio(edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
2192,10 → 2317,10
 
intel_dp->force_audio = i;
 
if (i == 0)
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_dp_detect_audio(connector);
else
has_audio = i > 0;
has_audio = (i == HDMI_AUDIO_ON);
 
if (has_audio == intel_dp->has_audio)
return 0;
2218,9 → 2343,8
done:
if (intel_dp->base.base.crtc) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->fb);
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
 
return 0;
2230,8 → 2354,9
intel_dp_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
 
if (intel_dpd_is_edp(dev))
if (is_edp(intel_dp))
intel_panel_destroy_backlight(dev);
 
drm_sysfs_connector_remove(connector);
2253,15 → 2378,13
}
 
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.dpms = intel_dp_dpms,
.mode_fixup = intel_dp_mode_fixup,
.prepare = intel_dp_prepare,
.mode_set = intel_dp_mode_set,
.commit = intel_dp_commit,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_dp_set_property,
2291,16 → 2414,11
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct intel_encoder *encoder;
 
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
struct intel_dp *intel_dp;
for_each_encoder_on_crtc(dev, crtc, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
 
if (encoder->crtc != crtc)
continue;
 
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
2337,7 → 2455,7
}
 
void
intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
2352,7 → 2470,9
return;
 
intel_dp->output_reg = output_reg;
intel_dp->dpms_mode = -1;
intel_dp->port = port;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
2379,20 → 2499,13
 
connector->polled = DRM_CONNECTOR_POLL_HPD;
 
if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
else if (output_reg == DP_D || output_reg == PCH_DP_D)
intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
intel_encoder->cloneable = false;
 
if (is_edp(intel_dp)) {
intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
// INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
// ironlake_panel_vdd_work);
}
 
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
 
2403,34 → 2516,37
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
 
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
/* Set up the DDC bus. */
switch (output_reg) {
case DP_A:
switch (port) {
case PORT_A:
name = "DPDDC-A";
break;
case DP_B:
case PCH_DP_B:
dev_priv->hotplug_supported_mask |=
HDMIB_HOTPLUG_INT_STATUS;
case PORT_B:
dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
dev_priv->hotplug_supported_mask |=
HDMIC_HOTPLUG_INT_STATUS;
case PORT_C:
dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
dev_priv->hotplug_supported_mask |=
HDMID_HOTPLUG_INT_STATUS;
case PORT_D:
dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
default:
WARN(1, "Invalid port %c\n", port_name(port));
break;
}
 
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
bool ret;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
 
2438,6 → 2554,13
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
 
if (!pp_on || !pp_off || !pp_div) {
DRM_INFO("bad panel power sequencing delays, disabling panel\n");
intel_dp_encoder_destroy(&intel_dp->base.base);
intel_dp_destroy(&intel_connector->base);
return;
}
 
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
2476,7 → 2599,14
 
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
}
 
intel_dp_i2c_init(intel_dp, intel_connector, name);
 
if (is_edp(intel_dp)) {
bool ret;
struct edid *edid;
 
ironlake_edp_panel_vdd_on(intel_dp);
ret = intel_dp_get_dpcd(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
2493,10 → 2623,20
intel_dp_destroy(&intel_connector->base);
return;
}
 
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
drm_mode_connector_update_edid_property(connector,
edid);
intel_dp->edid_mode_count =
drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
intel_dp->edid = edid;
}
ironlake_edp_panel_vdd_off(intel_dp, false);
}
 
intel_dp_i2c_init(intel_dp, intel_connector, name);
 
intel_encoder->hot_plug = intel_dp_hot_plug;
 
if (is_edp(intel_dp)) {
/drivers/video/drm/i915/intel_drv.h
26,26 → 26,45
#define __INTEL_DRV_H__
 
#include <linux/i2c.h>
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include <syscall.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_helper.h>
 
#define cpu_relax() asm volatile("rep; nop")
 
#define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
unsigned long timeout__ = GetTimerTicks() + msecs_to_jiffies(MS); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(jiffies, timeout__)) { \
if (time_after(GetTimerTicks(), timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
if (W) msleep(W); \
if (W ) { \
msleep(W); \
} else { \
cpu_relax(); \
} \
} \
ret__; \
})
 
#define wait_for_atomic_us(COND, US) ({ \
unsigned long timeout__ = GetTimerTicks() + usecs_to_jiffies(US); \
int ret__ = 0; \
while (!(COND)) { \
if (time_after(GetTimerTicks(), timeout__)) { \
ret__ = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
} \
ret__; \
})
 
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
 
113,6 → 132,10
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
#define INTEL_MODE_DP_FORCE_6BPC (0x10)
/* This flag must be set by the encoder's mode_fixup if it changes the crtc
* timings in the mode to prevent the crtc fixup from overwriting them.
* Currently only lvds needs that. */
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
 
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
142,16 → 165,48
 
struct intel_encoder {
struct drm_encoder base;
/*
* The new crtc this encoder will be driven from. Only differs from
* base->crtc while a modeset is in progress.
*/
struct intel_crtc *new_crtc;
 
int type;
bool needs_tv_clock;
/*
* Intel hw has only one MUX where encoders could be clone, hence a
* simple flag is enough to compute the possible_clones mask.
*/
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
void (*post_disable)(struct intel_encoder *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe);
int crtc_mask;
int clone_mask;
};
 
struct intel_connector {
struct drm_connector base;
/*
* The fixed encoder this connector is connected to.
*/
struct intel_encoder *encoder;
 
/*
* The new encoder this connector will be driven. Only differs from
* encoder while a modeset is in progress.
*/
struct intel_encoder *new_encoder;
 
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
};
 
struct intel_crtc {
159,15 → 214,23
enum pipe pipe;
enum plane plane;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
bool active; /* is the crtc on? independent of the dpms mode */
bool busy; /* is scanout buffer being updated frequently? */
struct timer_list idle_timer;
/*
* Whether the crtc and the connected output pipeline is active. Implies
* that crtc->enabled is set, i.e. the current mode configuration has
* some outputs connected to this crtc.
*/
bool active;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
 
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
unsigned long dspaddr_offset;
 
struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
175,8 → 238,8
bool cursor_visible;
unsigned int bpp;
 
bool no_pll; /* tertiary pipe for IVB */
bool use_pll_a;
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
};
 
struct intel_plane {
183,7 → 246,6
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
bool primary_disabled;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
200,6 → 262,25
struct drm_intel_sprite_colorkey *key);
};
 
struct intel_watermark_params {
unsigned long fifo_size;
unsigned long max_wm;
unsigned long default_wm;
unsigned long guard_size;
unsigned long cacheline_size;
};
 
struct cxsr_latency {
int is_desktop;
int is_ddr3;
unsigned long fsb_freq;
unsigned long mem_freq;
unsigned long display_sr;
unsigned long display_hpll_disable;
unsigned long cursor_sr;
unsigned long cursor_hpll_disable;
};
 
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
211,6 → 292,8
#define DIP_TYPE_AVI 0x82
#define DIP_VERSION_AVI 0x2
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
 
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
244,23 → 327,71
uint8_t ITC_EC_Q_SC;
/* PB4 - VIC 6:0 */
uint8_t VIC;
/* PB5 - PR 3:0 */
uint8_t PR;
/* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
uint8_t YQ_CN_PR;
/* PB6 to PB13 */
uint16_t top_bar_end;
uint16_t bottom_bar_start;
uint16_t left_bar_end;
uint16_t right_bar_start;
} avi;
} __attribute__ ((packed)) avi;
struct {
uint8_t vn[8];
uint8_t pd[16];
uint8_t sdi;
} spd;
} __attribute__ ((packed)) spd;
uint8_t payload[27];
} __attribute__ ((packed)) body;
} __attribute__((packed));
 
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
 
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
 
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
bool is_pch_edp;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
};
 
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
292,29 → 423,40
int interval;
};
 
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
 
extern void intel_attach_force_audio_property(struct drm_connector *connector);
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
 
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane);
 
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
321,22 → 463,39
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern u32 intel_panel_get_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
extern void intel_panel_destroy_backlight(struct drm_device *dev);
extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
struct intel_set_config {
struct drm_encoder **save_connector_encoders;
struct drm_crtc **save_encoder_crtcs;
 
bool fb_changed;
bool mode_changed;
};
 
extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_encoder_prepare(struct drm_encoder *encoder);
extern void intel_encoder_commit(struct drm_encoder *encoder);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
 
 
static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
{
return to_intel_connector(connector)->encoder;
358,12 → 517,10
bool load_detect_temp;
int dpms_mode;
};
extern bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old);
extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
struct drm_connector *connector,
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
 
extern void intelfb_restore(void);
372,16 → 529,11
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
 
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct intel_ring_buffer *pipelined);
extern void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
 
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
389,7 → 541,7
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
 
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
414,12 → 566,17
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
extern void intel_prepare_ddi(struct drm_device *dev);
extern void hsw_fdi_link_train(struct drm_crtc *crtc);
extern void intel_ddi_init(struct drm_device *dev, enum port port);
 
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void sandybridge_update_wm(struct drm_device *dev);
extern void intel_update_watermarks(struct drm_device *dev);
extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width,
int pixel_size);
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
 
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
426,4 → 583,30
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
 
extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
 
/* Power-related functions, located in intel_pm.c */
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
 
extern void intel_init_power_wells(struct drm_device *dev);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
 
extern void intel_enable_ddi(struct intel_encoder *encoder);
extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_dvo.c
26,11 → 26,10
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "dvo.h"
 
37,6 → 36,7
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
#define TFP410_ADDR 0x38
#define NS2501_ADDR 0x38
 
static const struct intel_dvo_device intel_dvo_devices[] = {
{
74,6 → 74,13
.slave_addr = 0x75,
.gpio = GMBUS_PORT_DPB,
.dev_ops = &ch7017_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ns2501",
.dvo_reg = DVOC,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
}
};
 
97,22 → 104,91
struct intel_dvo, base);
}
 
static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
 
return intel_dvo->dev.dev_ops->get_hw_state(&intel_dvo->dev);
}
 
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
u32 tmp;
 
tmp = I915_READ(intel_dvo->dev.dvo_reg);
 
if (!(tmp & DVO_ENABLE))
return false;
 
*pipe = PORT_TO_PIPE(tmp);
 
return true;
}
 
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
if (mode == DRM_MODE_DPMS_ON) {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
}
 
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
 
I915_WRITE(dvo_reg, temp | DVO_ENABLE);
I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
 
static void intel_dvo_dpms(struct drm_connector *connector, int mode)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
struct drm_crtc *crtc;
 
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
 
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
crtc = intel_dvo->base.base.crtc;
if (!crtc) {
intel_dvo->base.connectors_active = false;
return;
}
 
if (mode == DRM_MODE_DPMS_ON) {
intel_dvo->base.connectors_active = true;
 
intel_crtc_update_dpms(crtc);
 
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
} else {
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, mode);
I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
I915_READ(dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
 
intel_dvo->base.connectors_active = false;
 
intel_crtc_update_dpms(crtc);
}
 
intel_modeset_check_state(connector->dev);
}
 
static int intel_dvo_mode_valid(struct drm_connector *connector,
136,7 → 212,7
}
 
static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
157,7 → 233,6
C(vsync_end);
C(vtotal);
C(clock);
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
#undef C
}
 
244,7 → 319,7
* that's not the case.
*/
intel_ddc_get_modes(connector,
&dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPC));
if (!list_empty(&connector->probed_modes))
return 1;
 
268,15 → 343,13
}
 
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.dpms = intel_dvo_dpms,
.mode_fixup = intel_dvo_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_dvo_mode_set,
.commit = intel_encoder_commit,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
.destroy = intel_dvo_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
365,6 → 438,11
drm_encoder_init(dev, &intel_encoder->base,
&intel_dvo_enc_funcs, encoder_type);
 
intel_encoder->disable = intel_disable_dvo;
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
 
/* Now, try to find a controller */
for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
struct drm_connector *connector = &intel_connector->base;
376,7 → 454,7
* special cases, but otherwise default to what's defined
* in the spec.
*/
if (dvo->gpio != 0)
if (intel_gmbus_is_port_valid(dvo->gpio))
gpio = dvo->gpio;
else if (dvo->type == INTEL_DVO_CHIP_LVDS)
gpio = GMBUS_PORT_SSC;
387,7 → 465,7
* It appears that everything is on GPIOE except for panels
* on i830 laptops, which are on GPIOB (DVOA).
*/
i2c = &dev_priv->gmbus[gpio].adapter;
i2c = intel_gmbus_get_adapter(dev_priv, gpio);
 
intel_dvo->dev = *dvo;
if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
397,9 → 475,7
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
switch (dvo->type) {
case INTEL_DVO_CHIP_TMDS:
intel_encoder->clone_mask =
(1 << INTEL_DVO_TMDS_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
intel_encoder->cloneable = true;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
406,8 → 482,7
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
case INTEL_DVO_CHIP_LVDS:
intel_encoder->clone_mask =
(1 << INTEL_DVO_LVDS_CLONE_BIT);
intel_encoder->cloneable = false;
drm_connector_init(dev, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
/drivers/video/drm/i915/intel_fb.c
36,12 → 36,11
//#include <linux/init.h>
//#include <linux/vga_switcheroo.h>
 
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
 
73,7 → 72,7
 
 
static struct fb_ops intelfb_ops = {
// .owner = THIS_MODULE,
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
// .fb_fillrect = cfb_fillrect,
93,7 → 92,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
183,6 → 182,7
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
 
info->screen_base = 0xFE000000;
info->screen_size = size;
 
// memset(info->screen_base, 0, size);
190,6 → 190,8
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
obj->gtt_offset, obj);
197,6 → 199,7
 
mutex_unlock(&dev->struct_mutex);
// vga_switcheroo_client_fb_set(dev->pdev, info);
 
return 0;
 
out_unpin:
/drivers/video/drm/i915/intel_hdmi.c
28,28 → 28,28
 
#include <linux/i2c.h>
#include <linux/slab.h>
//#include <linux/delay.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include <linux/delay.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
int force_audio;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
};
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
 
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
 
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
}
75,121 → 75,255
frame->checksum = 0x100 - sum;
}
 
static u32 intel_infoframe_index(struct dip_infoframe *frame)
static u32 g4x_infoframe_index(struct dip_infoframe *frame)
{
u32 flags = 0;
 
switch (frame->type) {
case DIP_TYPE_AVI:
flags |= VIDEO_DIP_SELECT_AVI;
break;
return VIDEO_DIP_SELECT_AVI;
case DIP_TYPE_SPD:
flags |= VIDEO_DIP_SELECT_SPD;
break;
return VIDEO_DIP_SELECT_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
break;
return 0;
}
}
 
return flags;
static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
{
switch (frame->type) {
case DIP_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case DIP_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
}
}
 
static u32 intel_infoframe_flags(struct dip_infoframe *frame)
static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
{
u32 flags = 0;
 
switch (frame->type) {
case DIP_TYPE_AVI:
flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
break;
return VIDEO_DIP_ENABLE_AVI_HSW;
case DIP_TYPE_SPD:
flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
break;
return VIDEO_DIP_ENABLE_SPD_HSW;
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
break;
return 0;
}
}
 
return flags;
static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame, enum pipe pipe)
{
switch (frame->type) {
case DIP_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(pipe);
case DIP_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(pipe);
default:
DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
return 0;
}
}
 
static void i9xx_write_infoframe(struct drm_encoder *encoder,
static void g4x_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 port, flags, val = I915_READ(VIDEO_DIP_CTL);
u32 val = I915_READ(VIDEO_DIP_CTL);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
/* XXX first guess at handling video port, is this corrent? */
if (intel_hdmi->sdvox_reg == SDVOB)
port = VIDEO_DIP_PORT_B;
else if (intel_hdmi->sdvox_reg == SDVOC)
port = VIDEO_DIP_PORT_C;
else
return;
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
 
flags = intel_infoframe_index(frame);
val &= ~g4x_infoframe_enable(frame);
 
val &= ~VIDEO_DIP_SELECT_MASK;
I915_WRITE(VIDEO_DIP_CTL, val);
 
I915_WRITE(VIDEO_DIP_CTL, val | port | flags);
 
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
 
flags |= intel_infoframe_flags(frame);
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
I915_WRITE(VIDEO_DIP_CTL, val);
POSTING_READ(VIDEO_DIP_CTL);
}
 
static void ironlake_write_infoframe(struct drm_encoder *encoder,
static void ibx_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 flags, val = I915_READ(reg);
u32 val = I915_READ(reg);
 
intel_wait_for_vblank(dev, intel_crtc->pipe);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
flags = intel_infoframe_index(frame);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
 
val &= ~g4x_infoframe_enable(frame);
 
I915_WRITE(reg, val);
 
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
 
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
}
 
static void cpt_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
 
I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
if (frame->type != DIP_TYPE_AVI)
val &= ~g4x_infoframe_enable(frame);
 
I915_WRITE(reg, val);
 
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
 
flags |= intel_infoframe_flags(frame);
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
I915_WRITE(reg, val);
POSTING_READ(reg);
}
 
static void vlv_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
unsigned i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(reg);
 
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
 
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(frame);
 
val &= ~g4x_infoframe_enable(frame);
 
I915_WRITE(reg, val);
 
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
 
val |= g4x_infoframe_enable(frame);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
}
 
static void hsw_write_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->pipe);
unsigned int i, len = DIP_HEADER_SIZE + frame->len;
u32 val = I915_READ(ctl_reg);
 
if (data_reg == 0)
return;
 
val &= ~hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
 
mmiowb();
for (i = 0; i < len; i += 4) {
I915_WRITE(data_reg + i, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
I915_WRITE(data_reg + i, 0);
mmiowb();
 
val |= hsw_infoframe_enable(frame);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
 
static void intel_set_infoframe(struct drm_encoder *encoder,
struct dip_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (!intel_hdmi->has_hdmi_sink)
return;
 
intel_dip_infoframe_csum(frame);
intel_hdmi->write_infoframe(encoder, frame);
}
 
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
197,6 → 331,9
.len = DIP_LEN_AVI,
};
 
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
 
intel_set_infoframe(encoder, &avi_if);
}
 
215,6 → 352,225
intel_set_infoframe(encoder, &spd_if);
}
 
static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
 
assert_hdmi_port_disabled(intel_hdmi);
 
/* If the registers were not initialized yet, they might be zeroes,
* which means we're selecting the AVI DIP and we're setting its
* frequency to once. This seems to really confuse the HW and make
* things stop working (the register spec says the AVI always needs to
* be sent every VSync). So here we avoid writing to the register more
* than we need and also explicitly select the AVI DIP and explicitly
* set its frequency to every VSync. Avoiding to write it twice seems to
* be enough to solve the problem, but being defensive shouldn't hurt us
* either. */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
 
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
port = VIDEO_DIP_PORT_B;
break;
case SDVOC:
port = VIDEO_DIP_PORT_C;
break;
default:
BUG();
return;
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~VIDEO_DIP_ENABLE_VENDOR;
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
 
static void ibx_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
 
assert_hdmi_port_disabled(intel_hdmi);
 
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
 
switch (intel_hdmi->sdvox_reg) {
case HDMIB:
port = VIDEO_DIP_PORT_B;
break;
case HDMIC:
port = VIDEO_DIP_PORT_C;
break;
case HDMID:
port = VIDEO_DIP_PORT_D;
break;
default:
BUG();
return;
}
 
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
}
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
 
static void cpt_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
assert_hdmi_port_disabled(intel_hdmi);
 
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI);
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
 
/* Set both together, unset both together: see the spec. */
val |= VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
 
static void vlv_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
assert_hdmi_port_disabled(intel_hdmi);
 
/* See the big comment in g4x_set_infoframes() */
val |= VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC;
 
if (!intel_hdmi->has_hdmi_sink) {
if (!(val & VIDEO_DIP_ENABLE))
return;
val &= ~VIDEO_DIP_ENABLE;
I915_WRITE(reg, val);
POSTING_READ(reg);
return;
}
 
val |= VIDEO_DIP_ENABLE;
val &= ~(VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_GCP);
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
 
static void hsw_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 reg = HSW_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
 
assert_hdmi_port_disabled(intel_hdmi);
 
if (!intel_hdmi->has_hdmi_sink) {
I915_WRITE(reg, 0);
POSTING_READ(reg);
return;
}
 
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW);
 
I915_WRITE(reg, val);
POSTING_READ(reg);
 
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
}
 
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
221,12 → 577,11
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 sdvox;
 
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
sdvox = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev))
sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
253,21 → 608,41
 
if (HAS_PCH_CPT(dev))
sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 1)
else if (intel_crtc->pipe == PIPE_B)
sdvox |= SDVO_PIPE_B_SELECT;
 
I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
POSTING_READ(intel_hdmi->sdvox_reg);
 
intel_hdmi_set_avi_infoframe(encoder);
intel_hdmi_set_spd_infoframe(encoder);
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
 
static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->dev;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 tmp;
 
tmp = I915_READ(intel_hdmi->sdvox_reg);
 
if (!(tmp & SDVO_ENABLE))
return false;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
 
return true;
}
 
static void intel_enable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE;
 
276,6 → 651,17
 
temp = I915_READ(intel_hdmi->sdvox_reg);
 
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
/* Restore the transcoder select bit. */
if (pipe == PIPE_B)
enable_bits |= SDVO_PIPE_B_SELECT;
}
 
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
284,15 → 670,67
POSTING_READ(intel_hdmi->sdvox_reg);
}
 
if (mode != DRM_MODE_DPMS_ON) {
temp &= ~enable_bits;
} else {
temp |= enable_bits;
 
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
 
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
}
}
 
static void intel_disable_hdmi(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
u32 temp;
u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
 
temp = I915_READ(intel_hdmi->sdvox_reg);
 
/* HW workaround for IBX, we need to move the port to transcoder A
* before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
 
/* Again we need to write this twice. */
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
 
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(dev, pipe);
else
msleep(50);
}
}
 
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(intel_hdmi->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(intel_hdmi->sdvox_reg);
}
 
temp &= ~enable_bits;
 
I915_WRITE(intel_hdmi->sdvox_reg, temp);
POSTING_READ(intel_hdmi->sdvox_reg);
 
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
317,12 → 755,33
}
 
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
 
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
 
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
bit = HDMIB_HOTPLUG_LIVE_STATUS;
break;
case SDVOC:
bit = HDMIC_HOTPLUG_LIVE_STATUS;
break;
default:
bit = 0;
break;
}
 
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
 
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
331,24 → 790,30
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
 
if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
return status;
 
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
edid = drm_get_edid(connector,
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
 
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
if (intel_hdmi->force_audio != HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
}
connector->display_info.raw_edid = NULL;
kfree(edid);
}
 
if (status == connector_status_connected) {
if (intel_hdmi->force_audio)
intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
}
 
return status;
364,7 → 829,8
*/
 
return intel_ddc_get_modes(connector,
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
}
 
static bool
376,12 → 842,11
bool has_audio = false;
 
edid = drm_get_edid(connector,
&dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
if (edid) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
 
connector->display_info.raw_edid = NULL;
kfree(edid);
}
 
402,7 → 867,7
return ret;
#if 0
if (property == dev_priv->force_audio_property) {
int i = val;
enum hdmi_force_audio i = val;
bool has_audio;
 
if (i == intel_hdmi->force_audio)
410,17 → 875,18
 
intel_hdmi->force_audio = i;
 
if (i == 0)
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_hdmi_detect_audio(connector);
else
has_audio = i > 0;
has_audio = (i == HDMI_AUDIO_ON);
 
if (has_audio == intel_hdmi->has_audio)
return 0;
if (i == HDMI_AUDIO_OFF_DVI)
intel_hdmi->has_hdmi_sink = 0;
 
intel_hdmi->has_audio = has_audio;
goto done;
}
#endif
 
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_hdmi->color_range)
429,15 → 895,14
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
goto done;
}
#endif
 
return -EINVAL;
 
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y,
crtc->fb);
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
 
return 0;
450,16 → 915,20
kfree(connector);
}
 
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.dpms = intel_hdmi_dpms,
.mode_fixup = intel_hdmi_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_hdmi_mode_set,
.commit = intel_encoder_commit,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_hdmi_set_property,
483,7 → 952,7
intel_attach_broadcast_rgb_property(connector);
}
 
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
490,7 → 959,6
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
int i;
 
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
514,46 → 982,67
intel_encoder->type = INTEL_OUTPUT_HDMI;
 
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 0;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
/* Set up the DDC bus. */
if (sdvox_reg == SDVOB) {
intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_encoder->cloneable = false;
 
intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
break;
case PORT_A:
/* Internal port only for eDP. */
default:
BUG();
}
 
intel_hdmi->sdvox_reg = sdvox_reg;
 
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = i9xx_write_infoframe;
I915_WRITE(VIDEO_DIP_CTL, 0);
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
} else if (IS_VALLEYVIEW(dev)) {
intel_hdmi->write_infoframe = vlv_write_infoframe;
intel_hdmi->set_infoframes = vlv_set_infoframes;
} else if (IS_HASWELL(dev)) {
intel_hdmi->write_infoframe = hsw_write_infoframe;
intel_hdmi->set_infoframes = hsw_set_infoframes;
} else if (HAS_PCH_IBX(dev)) {
intel_hdmi->write_infoframe = ibx_write_infoframe;
intel_hdmi->set_infoframes = ibx_set_infoframes;
} else {
intel_hdmi->write_infoframe = ironlake_write_infoframe;
for_each_pipe(i)
I915_WRITE(TVIDEO_DIP_CTL(i), 0);
intel_hdmi->write_infoframe = cpt_write_infoframe;
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
 
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
if (IS_HASWELL(dev)) {
intel_encoder->enable = intel_enable_ddi;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs_hsw);
} else {
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs);
}
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 
intel_hdmi_add_properties(intel_hdmi, connector);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
/drivers/video/drm/i915/intel_i2c.c
28,143 → 28,30
*/
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
#define MSEC_PER_SEC 1000L
#define USEC_PER_MSEC 1000L
#define NSEC_PER_USEC 1000L
#define NSEC_PER_MSEC 1000000L
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
#define FSEC_PER_SEC 1000000000000000L
 
#define HZ_TO_MSEC_MUL32 0xA0000000
#define HZ_TO_MSEC_ADJ32 0x0
#define HZ_TO_MSEC_SHR32 28
#define HZ_TO_MSEC_MUL64 0xA000000000000000
#define HZ_TO_MSEC_ADJ64 0x0
#define HZ_TO_MSEC_SHR64 60
#define MSEC_TO_HZ_MUL32 0xCCCCCCCD
#define MSEC_TO_HZ_ADJ32 0x733333333
#define MSEC_TO_HZ_SHR32 35
#define MSEC_TO_HZ_MUL64 0xCCCCCCCCCCCCCCCD
#define MSEC_TO_HZ_ADJ64 0x73333333333333333
#define MSEC_TO_HZ_SHR64 67
#define HZ_TO_MSEC_NUM 10
#define HZ_TO_MSEC_DEN 1
#define MSEC_TO_HZ_NUM 1
#define MSEC_TO_HZ_DEN 10
struct gmbus_port {
const char *name;
int reg;
};
 
#define HZ_TO_USEC_MUL32 0x9C400000
#define HZ_TO_USEC_ADJ32 0x0
#define HZ_TO_USEC_SHR32 18
#define HZ_TO_USEC_MUL64 0x9C40000000000000
#define HZ_TO_USEC_ADJ64 0x0
#define HZ_TO_USEC_SHR64 50
#define USEC_TO_HZ_MUL32 0xD1B71759
#define USEC_TO_HZ_ADJ32 0x1FFF2E48E8A7
#define USEC_TO_HZ_SHR32 45
#define USEC_TO_HZ_MUL64 0xD1B71758E219652C
#define USEC_TO_HZ_ADJ64 0x1FFF2E48E8A71DE69AD4
#define USEC_TO_HZ_SHR64 77
#define HZ_TO_USEC_NUM 10000
#define HZ_TO_USEC_DEN 1
#define USEC_TO_HZ_NUM 1
#define USEC_TO_HZ_DEN 10000
static const struct gmbus_port gmbus_ports[] = {
{ "ssc", GPIOB },
{ "vga", GPIOA },
{ "panel", GPIOC },
{ "dpc", GPIOD },
{ "dpb", GPIOE },
{ "dpd", GPIOF },
};
 
unsigned int inline jiffies_to_usecs(const unsigned long j)
{
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (USEC_PER_SEC / HZ) * j;
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
# else
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
# endif
#endif
}
 
/*
* When we convert to jiffies then we interpret incoming values
* the following way:
*
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
*
* - 'too large' values [that would result in larger than
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
*
* - all other values are converted to jiffies by either multiplying
* the input value by a factor or dividing it with a factor
*
* We must also be careful about 32-bit overflows.
*/
unsigned long msecs_to_jiffies(const unsigned int m)
{
/*
* Negative value, means infinite timeout:
*/
if ((int)m < 0)
return MAX_JIFFY_OFFSET;
 
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
* HZ is equal to or smaller than 1000, and 1000 is a nice
* round multiple of HZ, divide with the factor between them,
* but round upwards:
*/
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
/*
* HZ is larger than 1000, and HZ is a nice round multiple of
* 1000 - simply multiply with the factor between them.
*
* But first make sure the multiplication result cannot
* overflow:
*/
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return m * (HZ / MSEC_PER_SEC);
#else
/*
* Generic case - multiply, round and divide. But first
* check that if we are doing a net multiplication, that
* we wouldn't overflow:
*/
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
 
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
>> MSEC_TO_HZ_SHR32;
#endif
}
 
unsigned long usecs_to_jiffies(const unsigned int u)
{
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
return u * (HZ / USEC_PER_SEC);
#else
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
>> USEC_TO_HZ_SHR32;
#endif
}
 
 
 
/* Intel GPIO access functions */
 
#define I2C_RISEFALL_TIME 20
#define I2C_RISEFALL_TIME 10
 
static inline struct intel_gmbus *
to_intel_gmbus(struct i2c_adapter *i2c)
172,21 → 59,11
return container_of(i2c, struct intel_gmbus, adapter);
}
 
struct intel_gpio {
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
struct drm_i915_private *dev_priv;
u32 reg;
};
 
void
intel_i2c_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_GMBUS0, 0);
else
I915_WRITE(GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
}
 
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
205,15 → 82,15
I915_WRITE(DSPCLK_GATE_D, val);
}
 
static u32 get_reserved(struct intel_gpio *gpio)
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *dev_priv = gpio->dev_priv;
struct drm_i915_private *dev_priv = bus->dev_priv;
struct drm_device *dev = dev_priv->dev;
u32 reserved = 0;
 
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
reserved = I915_READ_NOTRACE(gpio->reg) &
reserved = I915_READ_NOTRACE(bus->gpio_reg) &
(GPIO_DATA_PULLUP_DISABLE |
GPIO_CLOCK_PULLUP_DISABLE);
 
222,29 → 99,29
 
static int get_clock(void *data)
{
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
I915_WRITE_NOTRACE(gpio->reg, reserved);
return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
 
static int get_data(void *data)
{
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
I915_WRITE_NOTRACE(gpio->reg, reserved);
return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved);
return (I915_READ_NOTRACE(bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
 
static void set_clock(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
u32 clock_bits;
 
if (state_high)
253,15 → 130,15
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
 
I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
POSTING_READ(gpio->reg);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | clock_bits);
POSTING_READ(bus->gpio_reg);
}
 
static void set_data(void *data, int state_high)
{
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
struct intel_gmbus *bus = data;
struct drm_i915_private *dev_priv = bus->dev_priv;
u32 reserved = get_reserved(bus);
u32 data_bits;
 
if (state_high)
270,124 → 147,88
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
 
I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
POSTING_READ(gpio->reg);
I915_WRITE_NOTRACE(bus->gpio_reg, reserved | data_bits);
POSTING_READ(bus->gpio_reg);
}
 
static struct i2c_adapter *
intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
{
static const int map_pin_to_reg[] = {
0,
GPIOB,
GPIOA,
GPIOC,
GPIOD,
GPIOE,
0,
GPIOF,
};
struct intel_gpio *gpio;
 
if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
return NULL;
 
gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
if (gpio == NULL)
return NULL;
 
gpio->reg = map_pin_to_reg[pin];
if (HAS_PCH_SPLIT(dev_priv->dev))
gpio->reg += PCH_GPIOA - GPIOA;
gpio->dev_priv = dev_priv;
 
snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
"i915 GPIO%c", "?BACDE?F"[pin]);
// gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
gpio->algo.getscl = get_clock;
gpio->algo.udelay = I2C_RISEFALL_TIME;
gpio->algo.timeout = usecs_to_jiffies(2200);
gpio->algo.data = gpio;
 
if (i2c_bit_add_bus(&gpio->adapter))
goto out_free;
 
return &gpio->adapter;
 
out_free:
kfree(gpio);
return NULL;
}
 
static int
intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
struct intel_gpio *gpio = container_of(adapter,
struct intel_gpio,
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
int ret;
struct drm_i915_private *dev_priv = bus->dev_priv;
 
intel_i2c_reset(dev_priv->dev);
 
intel_i2c_quirk_set(dev_priv, true);
set_data(gpio, 1);
set_clock(gpio, 1);
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
 
ret = adapter->algo->master_xfer(adapter, msgs, num);
 
set_data(gpio, 1);
set_clock(gpio, 1);
intel_i2c_quirk_set(dev_priv, false);
 
return ret;
return 0;
}
 
static int
gmbus_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
static void
intel_gpio_post_xfer(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = adapter->algo_data;
int i, reg_offset;
struct drm_i915_private *dev_priv = bus->dev_priv;
 
if (bus->force_bit)
return intel_i2c_quirk_xfer(dev_priv,
bus->force_bit, msgs, num);
set_data(bus, 1);
set_clock(bus, 1);
intel_i2c_quirk_set(dev_priv, false);
}
 
reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
static void
intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
{
struct drm_i915_private *dev_priv = bus->dev_priv;
struct i2c_algo_bit_data *algo;
 
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
algo = &bus->bit_algo;
 
for (i = 0; i < num; i++) {
u16 len = msgs[i].len;
u8 *buf = msgs[i].buf;
/* -1 to map pin pair to gmbus index */
bus->gpio_reg = dev_priv->gpio_mmio_base + gmbus_ports[pin - 1].reg;
 
if (msgs[i].flags & I2C_M_RD) {
bus->adapter.algo_data = algo;
algo->setsda = set_data;
algo->setscl = set_clock;
algo->getsda = get_data;
algo->getscl = get_clock;
algo->pre_xfer = intel_gpio_pre_xfer;
algo->post_xfer = intel_gpio_post_xfer;
algo->udelay = I2C_RISEFALL_TIME;
algo->timeout = usecs_to_jiffies(2200);
algo->data = bus;
}
 
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len;
u8 *buf = msg->buf;
 
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
POSTING_READ(GMBUS2+reg_offset);
do {
while (len) {
int ret;
u32 val, loop = 0;
u32 gmbus2;
 
if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY),
50);
if (ret)
return -ETIMEDOUT;
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
 
val = I915_READ(GMBUS3 + reg_offset);
do {
394,28 → 235,34
*buf++ = val & 0xff;
val >>= 8;
} while (--len && ++loop < 4);
} while (len);
} else {
}
 
return 0;
}
 
static int
gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
{
int reg_offset = dev_priv->gpio_mmio_base;
u16 len = msg->len;
u8 *buf = msg->buf;
u32 val, loop;
 
val = loop = 0;
do {
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
while (len && loop < 4) {
val |= *buf++ << (8 * loop++);
len -= 1;
}
 
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
(i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
(msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
(msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_CYCLE_WAIT |
(msg->len << GMBUS_BYTE_COUNT_SHIFT) |
(msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
POSTING_READ(GMBUS2+reg_offset);
 
while (len) {
if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
goto clear_err;
int ret;
u32 gmbus2;
 
val = loop = 0;
do {
423,19 → 270,149
} while (--len && ++loop < 4);
 
I915_WRITE(GMBUS3 + reg_offset, val);
POSTING_READ(GMBUS2+reg_offset);
 
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY),
50);
if (ret)
return -ETIMEDOUT;
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
}
return 0;
}
 
if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
/*
* The gmbus controller can combine a 1 or 2 byte write with a read that
* immediately follows it by using an "INDEX" cycle.
*/
static bool
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
(msgs[i + 1].flags & I2C_M_RD));
}
 
static int
gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
{
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus1_index = 0;
u32 gmbus5 = 0;
int ret;
 
if (msgs[0].len == 2)
gmbus5 = GMBUS_2BYTE_INDEX_EN |
msgs[0].buf[1] | (msgs[0].buf[0] << 8);
if (msgs[0].len == 1)
gmbus1_index = GMBUS_CYCLE_INDEX |
(msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
 
/* GMBUS5 holds 16-bit index */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, gmbus5);
 
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
 
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
I915_WRITE(GMBUS5 + reg_offset, 0);
 
return ret;
}
 
static int
gmbus_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs,
int num)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
int i, reg_offset;
int ret = 0;
 
mutex_lock(&dev_priv->gmbus_mutex);
 
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
goto out;
}
 
reg_offset = dev_priv->gpio_mmio_base;
 
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
for (i = 0; i < num; i++) {
u32 gmbus2;
 
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
} else if (msgs[i].flags & I2C_M_RD) {
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
} else {
ret = gmbus_xfer_write(dev_priv, &msgs[i]);
}
 
if (ret == -ETIMEDOUT)
goto timeout;
if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
if (ret == -ENXIO)
goto clear_err;
 
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
50);
if (ret)
goto timeout;
if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
 
goto done;
/* Generate a STOP condition on the bus. Note that gmbus can't generata
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
 
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
}
I915_WRITE(GMBUS0 + reg_offset, 0);
ret = ret ?: i;
goto out;
 
clear_err:
/*
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
*
* If no ACK is received during the address phase of a transaction, the
* adapter must report -ENXIO. It is not clear what to return if no ACK
* is received at other times. But we have to be careful to not return
* spurious -ENXIO because that will prevent i2c and drm edid functions
* from retrying. So return -ENXIO only when gmbus properly quiescents -
* timing out seems to happen when there _is_ a ddc chip present, but
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
}
 
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
442,37 → 419,32
*/
I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
I915_WRITE(GMBUS1 + reg_offset, 0);
 
done:
/* Mark the GMBUS interface as disabled. We will re-enable it at the
* start of the next xfer, till then let it sleep.
*/
I915_WRITE(GMBUS0 + reg_offset, 0);
return i;
 
DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
goto out;
 
timeout:
DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
bus->reg0 & 0xff, bus->adapter.name);
DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
bus->adapter.name, bus->reg0 & 0xff);
I915_WRITE(GMBUS0 + reg_offset, 0);
 
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
if (!bus->force_bit)
return -ENOMEM;
bus->force_bit = true;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
 
return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
out:
mutex_unlock(&dev_priv->gmbus_mutex);
return ret;
}
 
static u32 gmbus_func(struct i2c_adapter *adapter)
{
struct intel_gmbus *bus = container_of(adapter,
struct intel_gmbus,
adapter);
 
if (bus->force_bit)
bus->force_bit->algo->functionality(bus->force_bit);
 
return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
return i2c_bit_algo.functionality(adapter) &
(I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
/* I2C_FUNC_10BIT_ADDR | */
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
489,47 → 461,41
*/
int intel_setup_gmbus(struct drm_device *dev)
{
static const char *names[GMBUS_NUM_PORTS] = {
"disabled",
"ssc",
"vga",
"panel",
"dpc",
"dpb",
"reserved",
"dpd",
};
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i;
 
dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
GFP_KERNEL);
if (dev_priv->gmbus == NULL)
return -ENOMEM;
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else
dev_priv->gpio_mmio_base = 0;
 
mutex_init(&dev_priv->gmbus_mutex);
 
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
u32 port = i + 1; /* +1 to map gmbus index to pin pair */
 
// bus->adapter.owner = THIS_MODULE;
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s",
names[i]);
gmbus_ports[i].name);
 
bus->adapter.dev.parent = &dev->pdev->dev;
bus->adapter.algo_data = dev_priv;
bus->dev_priv = dev_priv;
 
bus->adapter.algo = &gmbus_algorithm;
// ret = i2c_add_adapter(&bus->adapter);
// if (ret)
// goto err;
 
/* By default use a conservative clock rate */
bus->reg0 = i | GMBUS_RATE_100KHZ;
bus->reg0 = port | GMBUS_RATE_100KHZ;
 
/* XXX force bit banging until GMBUS is fully debugged */
bus->force_bit = intel_gpio_create(dev_priv, i);
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = true;
 
intel_gpio_setup(bus, port);
 
}
 
intel_i2c_reset(dev_priv->dev);
541,11 → 507,18
// struct intel_gmbus *bus = &dev_priv->gmbus[i];
// i2c_del_adapter(&bus->adapter);
// }
kfree(dev_priv->gmbus);
dev_priv->gmbus = NULL;
return ret;
}
 
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned port)
{
WARN_ON(!intel_gmbus_is_port_valid(port));
/* -1 to map pin pair to gmbus index */
return (intel_gmbus_is_port_valid(port)) ?
&dev_priv->gmbus[port - 1].adapter : NULL;
}
 
void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
557,20 → 530,8
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
 
if (force_bit) {
if (bus->force_bit == NULL) {
struct drm_i915_private *dev_priv = adapter->algo_data;
bus->force_bit = intel_gpio_create(dev_priv,
bus->reg0 & 0xff);
bus->force_bit = force_bit;
}
} else {
if (bus->force_bit) {
// i2c_del_adapter(bus->force_bit);
kfree(bus->force_bit);
bus->force_bit = NULL;
}
}
}
 
void intel_teardown_gmbus(struct drm_device *dev)
{
577,18 → 538,8
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
 
if (dev_priv->gmbus == NULL)
return;
 
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
if (bus->force_bit) {
// i2c_del_adapter(bus->force_bit);
kfree(bus->force_bit);
}
// i2c_del_adapter(&bus->adapter);
}
 
kfree(dev_priv->gmbus);
dev_priv->gmbus = NULL;
}
/drivers/video/drm/i915/intel_lvds.c
31,12 → 31,11
//#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
//#include <linux/acpi.h>
 
65,12 → 64,40
struct intel_lvds, base);
}
 
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp;
 
if (HAS_PCH_SPLIT(dev)) {
lvds_reg = PCH_LVDS;
} else {
lvds_reg = LVDS;
}
 
tmp = I915_READ(lvds_reg);
 
if (!(tmp & LVDS_PORT_EN))
return false;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
 
return true;
}
 
/**
* Sets the power state for the panel.
*/
static void intel_lvds_enable(struct intel_lvds *intel_lvds)
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
 
107,12 → 134,13
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
 
intel_panel_enable_backlight(dev);
intel_panel_enable_backlight(dev, intel_crtc->pipe);
}
 
static void intel_lvds_disable(struct intel_lvds *intel_lvds)
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
 
141,18 → 169,6
POSTING_READ(lvds_reg);
}
 
static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
if (mode == DRM_MODE_DPMS_ON)
intel_lvds_enable(intel_lvds);
else
intel_lvds_disable(intel_lvds);
 
/* XXX: We never power down the LVDS pairs. */
}
 
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
187,6 → 203,8
 
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
 
mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
}
 
static void
208,6 → 226,8
 
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
 
mode->private_flags |= INTEL_MODE_CRTC_TIMINGS_SET;
}
 
static inline u32 panel_fitter_scaling(u32 source, u32 target)
224,14 → 244,13
}
 
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct drm_encoder *tmp_encoder;
struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
 
241,14 → 260,8
return false;
}
 
/* Should never happen!! */
list_for_each_entry(tmp_encoder, &dev->mode_config.encoder_list, head) {
if (tmp_encoder != encoder && tmp_encoder->crtc == encoder->crtc) {
DRM_ERROR("Can't enable LVDS and another "
"encoder on the same pipe\n");
if (intel_encoder_check_is_cloned(&intel_lvds->base))
return false;
}
}
 
/*
* We have timings from the BIOS for the panel, put them in
283,6 → 296,8
for_each_pipe(pipe)
I915_WRITE(BCLRPAT(pipe), 0);
 
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
switch (intel_lvds->fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
398,29 → 413,6
return true;
}
 
static void intel_lvds_prepare(struct drm_encoder *encoder)
{
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
/*
* Prior to Ironlake, we must disable the pipe if we want to adjust
* the panel fitter. However at all other times we can just reset
* the registers regardless.
*/
if (!HAS_PCH_SPLIT(encoder->dev) && intel_lvds->pfit_dirty)
intel_lvds_disable(intel_lvds);
}
 
static void intel_lvds_commit(struct drm_encoder *encoder)
{
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
/* Always do a full power on as we do not know what state
* we were left in.
*/
intel_lvds_enable(intel_lvds);
}
 
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
474,7 → 466,7
 
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping forced modeset for %s\n", id->ident);
DRM_INFO("Skipping forced modeset for %s\n", id->ident);
return 1;
}
 
535,7 → 527,7
dev_priv->modeset_on_lid = 0;
 
mutex_lock(&dev->mode_config.mutex);
drm_helper_resume_force_mode(dev);
intel_modeset_check_state(dev);
mutex_unlock(&dev->mode_config.mutex);
 
return NOTIFY_OK;
588,7 → 580,7
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
drm_crtc_helper_set_mode(crtc, &crtc->mode,
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
}
597,11 → 589,9
}
 
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.dpms = intel_lvds_dpms,
.mode_fixup = intel_lvds_mode_fixup,
.prepare = intel_lvds_prepare,
.mode_set = intel_lvds_mode_set,
.commit = intel_lvds_commit,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
611,7 → 601,7
};
 
static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.dpms = intel_connector_dpms,
.detect = intel_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_lvds_set_property,
624,7 → 614,7
 
static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
{
DRM_DEBUG_KMS("Skipping LVDS initialization for %s\n", id->ident);
DRM_INFO("Skipping LVDS initialization for %s\n", id->ident);
return 1;
}
 
741,6 → 731,62
DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard HP t5740e Thin Client",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard st5747",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "MSI Wind Box DC500",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_BOARD_NAME, "MS-7469"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "ZOTAC ZBOXSD-ID12/ID13",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Supermicro X7SPA-H",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
},
},
 
{ } /* terminating entry */
};
823,7 → 869,7
child->device_type != DEVICE_TYPE_LFP)
continue;
 
if (child->i2c_pin)
if (intel_gmbus_is_port_valid(child->i2c_pin))
*i2c_pin = child->i2c_pin;
 
/* However, we cannot trust the BIOS writers to populate
846,6 → 892,18
return false;
}
 
static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
* LVDS presence pin, use it. */
if (HAS_PCH_SPLIT(dev))
return true;
 
/* Otherwise LVDS was only attached to mobile products,
* except for the inglorious 830gm */
return IS_MOBILE(dev) && !IS_I830(dev);
}
 
/**
* intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
867,6 → 925,9
int pipe;
u8 pin;
 
if (!intel_lvds_supported(dev))
return false;
 
/* Skip init on machines we know falsely report LVDS */
// if (dmi_check_system(intel_no_lvds))
// return false;
910,12 → 971,19
drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
 
intel_encoder->enable = intel_enable_lvds;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
 
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->cloneable = false;
if (HAS_PCH_SPLIT(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
else if (IS_GEN4(dev))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
 
950,7 → 1018,8
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
&dev_priv->gmbus[pin].adapter);
intel_gmbus_get_adapter(dev_priv,
pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
1022,35 → 1091,14
goto failed;
 
out:
if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
 
pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
 
/* make sure PWM is enabled and locked to the LVDS pipe */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
if (pipe == 0 && (pwm & PWM_PIPE_B))
I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
if (pipe)
pwm |= PWM_PIPE_B;
else
pwm &= ~PWM_PIPE_B;
I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
 
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
/*
* Unlock registers and just
* leave them unlocked
*/
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_PP_CONTROL,
I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
} else {
/*
* Unlock registers and just
* leave them unlocked
*/
I915_WRITE(PP_CONTROL,
I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
}
/drivers/video/drm/i915/intel_modes.c
27,35 → 27,27
#include <linux/i2c.h>
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include "drmP.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
 
/**
* intel_ddc_probe
*
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
* @edid: previously read EDID information
*/
bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid)
{
struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
u8 out_buf[] = { 0x0, 0x0};
u8 buf[2];
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.flags = 0,
.len = 1,
.buf = out_buf,
},
{
.addr = 0x50,
.flags = I2C_M_RD,
.len = 1,
.buf = buf,
}
};
int ret;
 
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
kfree(edid);
 
return ret;
}
 
/**
69,24 → 61,19
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
 
edid = drm_get_edid(connector, adapter);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
connector->display_info.raw_edid = NULL;
kfree(edid);
}
if (!edid)
return 0;
 
return ret;
return intel_connector_update_modes(connector, edid);
}
 
static const char *force_audio_names[] = {
"off",
"auto",
"on",
static const struct drm_prop_enum_list force_audio_names[] = {
{ HDMI_AUDIO_OFF_DVI, "force-dvi" },
{ HDMI_AUDIO_OFF, "off" },
{ HDMI_AUDIO_AUTO, "auto" },
{ HDMI_AUDIO_ON, "on" },
};
 
void
95,19 → 82,16
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
int i;
#if 0
prop = dev_priv->force_audio_property;
if (prop == NULL) {
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
prop = drm_property_create_enum(dev, 0,
"audio",
force_audio_names,
ARRAY_SIZE(force_audio_names));
if (prop == NULL)
return;
 
for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
 
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
114,9 → 98,9
#endif
}
 
static const char *broadcast_rgb_names[] = {
"Full",
"Limited 16:235",
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
{ 0, "Full" },
{ 1, "Limited 16:235" },
};
 
void
125,19 → 109,16
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
int i;
#if 0
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
"Broadcast RGB",
broadcast_rgb_names,
ARRAY_SIZE(broadcast_rgb_names));
if (prop == NULL)
return;
 
for (i = 0; i < ARRAY_SIZE(broadcast_rgb_names); i++)
drm_property_add_enum(prop, i, i, broadcast_rgb_names[i]);
 
dev_priv->broadcast_rgb_property = prop;
}
 
/drivers/video/drm/i915/intel_opregion.c
25,12 → 25,16
*
*/
 
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
//#include <linux/acpi.h>
//#include <linux/acpi_io.h>
//#include <acpi/video.h>
#include <linux/errno.h>
#include "drmP.h"
#include "i915_drm.h"
 
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
 
148,19 → 152,13
#ifdef CONFIG_ACPI
#endif
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
int intel_opregion_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_opregion *opregion = &dev_priv->opregion;
void *base;
void __iomem *base;
u32 asls, mboxes;
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
 
pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
174,7 → 172,9
if (!base)
return -ENOMEM;
 
if (memcmp(base, OPREGION_SIGNATURE, 16)) {
memcpy(buf, base, sizeof(buf));
 
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
DRM_DEBUG_DRIVER("opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
184,7 → 184,7
 
opregion->lid_state = base + ACPI_CLID;
 
mboxes = opregion->header->mboxes;
mboxes = ioread32(&opregion->header->mboxes);
if (mboxes & MBOX_ACPI) {
DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
/drivers/video/drm/i915/intel_panel.c
28,23 → 28,12
* Chris Wilson <chris@chris-wilson.co.uk>
*/
 
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
#include <linux/moduleparam.h>
#include "intel_drv.h"
 
static inline int pci_read_config_byte(struct pci_dev *dev, int where,
u8 *val)
{
*val = PciRead8(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_byte(struct pci_dev *dev, int where,
u8 val)
{
PciWrite8(dev->busnr, dev->devfn, where, val);
return 1;
}
 
 
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
 
void
62,8 → 51,6
adjusted_mode->vtotal = fixed_mode->vtotal;
 
adjusted_mode->clock = fixed_mode->clock;
 
drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
}
 
/* adjusted_mode has been preset to be the panel's fixed mode */
70,7 → 57,7
void
intel_pch_panel_fitting(struct drm_device *dev,
int fitting_mode,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
156,8 → 143,8
dev_priv->saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
dev_priv->saveBLC_PWM_CTL);
val = dev_priv->saveBLC_PWM_CTL;
dev_priv->saveBLC_PWM_CTL2);
val = dev_priv->saveBLC_PWM_CTL2;
}
} else {
val = I915_READ(BLC_PWM_CTL);
176,19 → 163,12
return val;
}
 
u32 intel_panel_get_max_backlight(struct drm_device *dev)
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
 
max = i915_read_blc_pwm_ctl(dev_priv);
if (max == 0) {
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
printk(KERN_WARNING "fixme: max PWM is zero.\n");
return 1;
}
 
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
202,13 → 182,50
max *= 0xff;
}
 
return max;
}
 
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
u32 max;
 
max = _intel_panel_get_max_backlight(dev);
if (max == 0) {
/* XXX add code here to query mode clock or hardware clock
* and program max PWM appropriately.
*/
printk("fixme: max PWM is zero\n");
return 1;
}
 
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
 
u32 intel_panel_get_backlight(struct drm_device *dev)
static int i915_panel_invert_brightness;
MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please "
"report PCI device ID, subsystem vendor and subsystem device ID "
"to dri-devel@lists.freedesktop.org, if your machine needs it. "
"It will then be included in an upcoming module version.");
module_param_named(invert_brightness, i915_panel_invert_brightness, int, 0600);
static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (i915_panel_invert_brightness < 0)
return val;
 
if (i915_panel_invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS)
return intel_panel_get_max_backlight(dev) - val;
 
return val;
}
 
static u32 intel_panel_get_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
if (HAS_PCH_SPLIT(dev)) {
226,6 → 243,7
}
}
 
val = intel_panel_compute_brightness(dev, val);
DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
return val;
}
243,6 → 261,7
u32 tmp;
 
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, level);
 
if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
278,9 → 297,24
 
dev_priv->backlight_enabled = false;
intel_panel_actually_set_backlight(dev, 0);
 
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
 
reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
 
I915_WRITE(reg, I915_READ(reg) & ~BLM_PWM_ENABLE);
 
if (HAS_PCH_SPLIT(dev)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp &= ~BLM_PCH_PWM_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
}
 
void intel_panel_enable_backlight(struct drm_device *dev)
void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
287,6 → 321,45
if (dev_priv->backlight_level == 0)
dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
if (INTEL_INFO(dev)->gen >= 4) {
uint32_t reg, tmp;
 
reg = HAS_PCH_SPLIT(dev) ? BLC_PWM_CPU_CTL2 : BLC_PWM_CTL2;
 
 
tmp = I915_READ(reg);
 
/* Note that this can also get called through dpms changes. And
* we don't track the backlight dpms state, hence check whether
* we have to do anything first. */
if (tmp & BLM_PWM_ENABLE)
goto set_level;
 
if (dev_priv->num_pipe == 3)
tmp &= ~BLM_PIPE_SELECT_IVB;
else
tmp &= ~BLM_PIPE_SELECT;
 
tmp |= BLM_PIPE(pipe);
tmp &= ~BLM_PWM_ENABLE;
 
I915_WRITE(reg, tmp);
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
 
if (HAS_PCH_SPLIT(dev)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
tmp &= ~BLM_PCH_OVERRIDE_ENABLE;
I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
}
}
 
set_level:
/* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1.
* BLC_PWM_CPU_CTL may be cleared to zero automatically when these
* registers are set.
*/
dev_priv->backlight_enabled = true;
intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
}
359,8 → 432,13
else
return -ENODEV;
 
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = intel_panel_get_max_backlight(dev);
props.max_brightness = _intel_panel_get_max_backlight(dev);
if (props.max_brightness == 0) {
DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
return -ENODEV;
}
dev_priv->backlight =
backlight_device_register("intel_backlight",
&connector->kdev, dev,
/drivers/video/drm/i915/intel_pm.c
0,0 → 1,4243
/*
* Copyright © 2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eugeni Dodonov <eugeni.dodonov@intel.com>
*
*/
 
#define iowrite32(v, addr) writel((v), (addr))
#define ioread32(addr) readl(addr)
 
//#include <linux/cpufreq.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <linux/math64.h>
//#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
 
#define FORCEWAKE_ACK_TIMEOUT_MS 2
 
#define assert_spin_locked(x)
 
void getrawmonotonic(struct timespec *ts);
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
 
static inline struct timespec timespec_sub(struct timespec lhs,
struct timespec rhs)
{
struct timespec ts_delta;
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
lhs.tv_nsec - rhs.tv_nsec);
return ts_delta;
}
 
 
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
* during in-memory transfers and, therefore, reduce the power packet.
*
* The benefits of FBC are mostly visible with solid backgrounds and
* variation-less patterns.
*
* FBC-related functionality can be enabled by the means of the
* i915.i915_enable_fbc parameter
*/
 
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 fbc_ctl;
 
/* Disable compression */
fbc_ctl = I915_READ(FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
 
fbc_ctl &= ~FBC_CTL_EN;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
/* Wait for compressing bit to clear */
if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
return;
}
 
DRM_DEBUG_KMS("disabled FBC\n");
}
 
static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int cfb_pitch;
int plane, i;
u32 fbc_ctl, fbc_ctl2;
 
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
 
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0);
 
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= plane;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
 
/* enable it... */
fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= obj->fence_reg;
I915_WRITE(FBC_CONTROL, fbc_ctl);
 
DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
cfb_pitch, crtc->y, intel_crtc->plane);
}
 
static bool i8xx_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
}
 
static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
 
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
 
/* enable it... */
I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
 
static void g4x_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
/* Disable compression */
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool g4x_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
 
static void sandybridge_blit_fbc_update(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
 
/* Make sure blitter notifies FBC of writes */
gen6_gt_force_wake_get(dev_priv);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
gen6_gt_force_wake_put(dev_priv);
}
 
static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
u32 dpfc_ctl;
 
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
/* Set persistent mode for front-buffer rendering, ala X. */
dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 
I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
if (IS_GEN6(dev)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
sandybridge_blit_fbc_update(dev);
}
 
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
 
static void ironlake_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpfc_ctl;
 
/* Disable compression */
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
 
DRM_DEBUG_KMS("disabled FBC\n");
}
}
 
static bool ironlake_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
 
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!dev_priv->display.fbc_enabled)
return false;
 
return dev_priv->display.fbc_enabled(dev);
}
 
#if 0
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
container_of(to_delayed_work(__work),
struct intel_fbc_work, work);
struct drm_device *dev = work->crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
mutex_lock(&dev->struct_mutex);
if (work == dev_priv->fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
if (work->crtc->fb == work->fb) {
dev_priv->display.enable_fbc(work->crtc,
work->interval);
 
dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
dev_priv->cfb_fb = work->crtc->fb->base.id;
dev_priv->cfb_y = work->crtc->y;
}
 
dev_priv->fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
 
kfree(work);
}
 
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
if (dev_priv->fbc_work == NULL)
return;
 
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
 
/* Synchronisation is provided by struct_mutex and checking of
* dev_priv->fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
if (cancel_delayed_work(&dev_priv->fbc_work->work))
/* tasklet was killed before being run, clean up */
kfree(dev_priv->fbc_work);
 
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
dev_priv->fbc_work = NULL;
}
#endif
 
void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
// if (!dev_priv->display.enable_fbc)
return;
#if 0
intel_cancel_fbc_work(dev_priv);
 
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
dev_priv->display.enable_fbc(crtc, interval);
return;
}
 
work->crtc = crtc;
work->fb = crtc->fb;
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
 
dev_priv->fbc_work = work;
 
DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
 
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
* this delay also serves a second purpose: it allows for a
* vblank to pass after disabling the FBC before we attempt
* to modify the control registers.
*
* A more complicated solution would involve tracking vblanks
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
#endif
 
}
 
void intel_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
// intel_cancel_fbc_work(dev_priv);
 
// if (!dev_priv->display.disable_fbc)
// return;
 
// dev_priv->display.disable_fbc(dev);
dev_priv->cfb_plane = -1;
}
 
/**
* intel_update_fbc - enable/disable FBC as needed
* @dev: the drm_device
*
* Set up the framebuffer compression hardware at mode set time. We
* enable it if possible:
* - plane A only (on pre-965)
* - no pixel mulitply/line duplication
* - no alpha buffer discard
* - no dual wide
* - framebuffer <= 2048 in width, 1536 in height
*
* We can't assume that any compression will take place (worst case),
* so the compressed buffer has to be the same size as the uncompressed
* one. It also must reside (along with the line length buffer) in
* stolen memory.
*
* We need to enable/disable FBC on a global basis.
*/
void intel_update_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL, *tmp_crtc;
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
int enable_fbc;
 
if (!i915_powersave)
return;
 
if (!I915_HAS_FBC(dev))
return;
 
/*
* If FBC is already on, we just have to verify that we can
* keep it that way...
* Need to disable if:
* - more than one pipe is active
* - changing FBC params (stride, fence, mode)
* - new fb is too large to fit in compressed buffer
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (tmp_crtc->enabled &&
!to_intel_crtc(tmp_crtc)->primary_disabled &&
tmp_crtc->fb) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
goto out_disable;
}
crtc = tmp_crtc;
}
}
 
if (!crtc || crtc->fb == NULL) {
DRM_DEBUG_KMS("no output, disabling\n");
dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
goto out_disable;
}
 
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
enable_fbc = i915_enable_fbc;
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
DRM_DEBUG_KMS("fbc disabled per module param\n");
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((crtc->mode.hdisplay > 2048) ||
(crtc->mode.vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
 
/* The use of a CPU fence is mandatory in order to detect writes
* by the CPU to the scanout and trigger updates to the FBC.
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
 
/* If the kernel debugger is active, always disable compression */
if (in_dbg_master())
goto out_disable;
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
if (dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_fb == fb->base.id &&
dev_priv->cfb_y == crtc->y)
return;
 
if (intel_fbc_enabled(dev)) {
/* We update FBC along two paths, after changing fb/crtc
* configuration (modeswitching) and after page-flipping
* finishes. For the latter, we know that not only did
* we disable the FBC at the start of the page-flip
* sequence, but also more than one vblank has passed.
*
* For the former case of modeswitching, it is possible
* to switch between two FBC valid configurations
* instantaneously so we do need to disable the FBC
* before we can modify its control registers. We also
* have to wait for the next vblank for that to take
* effect. However, since we delay enabling FBC we can
* assume that a vblank has passed since disabling and
* that we can safely alter the registers in the deferred
* callback.
*
* In the scenario that we go from a valid to invalid
* and then back to valid FBC configuration we have
* no strict enforcement that a vblank occurred since
* disabling the FBC. However, along all current pipe
* disabling paths we do need to wait for a vblank at
* some point. And we wait before enabling FBC anyway.
*/
DRM_DEBUG_KMS("disabling active FBC for update\n");
intel_disable_fbc(dev);
}
 
intel_enable_fbc(crtc, 500);
return;
 
out_disable:
/* Multiple disables should be harmless */
if (intel_fbc_enabled(dev)) {
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
}
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 tmp;
 
tmp = I915_READ(CLKCFG);
 
switch (tmp & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_533:
dev_priv->fsb_freq = 533; /* 133*4 */
break;
case CLKCFG_FSB_800:
dev_priv->fsb_freq = 800; /* 200*4 */
break;
case CLKCFG_FSB_667:
dev_priv->fsb_freq = 667; /* 167*4 */
break;
case CLKCFG_FSB_400:
dev_priv->fsb_freq = 400; /* 100*4 */
break;
}
 
switch (tmp & CLKCFG_MEM_MASK) {
case CLKCFG_MEM_533:
dev_priv->mem_freq = 533;
break;
case CLKCFG_MEM_667:
dev_priv->mem_freq = 667;
break;
case CLKCFG_MEM_800:
dev_priv->mem_freq = 800;
break;
}
 
/* detect pineview DDR3 setting */
tmp = I915_READ(CSHRDDR3CTL);
dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
}
 
static void i915_ironlake_get_mem_freq(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u16 ddrpll, csipll;
 
ddrpll = I915_READ16(DDRMPLL1);
csipll = I915_READ16(CSIPLL0);
 
switch (ddrpll & 0xff) {
case 0xc:
dev_priv->mem_freq = 800;
break;
case 0x10:
dev_priv->mem_freq = 1066;
break;
case 0x14:
dev_priv->mem_freq = 1333;
break;
case 0x18:
dev_priv->mem_freq = 1600;
break;
default:
DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
}
 
dev_priv->ips.r_t = dev_priv->mem_freq;
 
switch (csipll & 0x3ff) {
case 0x00c:
dev_priv->fsb_freq = 3200;
break;
case 0x00e:
dev_priv->fsb_freq = 3733;
break;
case 0x010:
dev_priv->fsb_freq = 4266;
break;
case 0x012:
dev_priv->fsb_freq = 4800;
break;
case 0x014:
dev_priv->fsb_freq = 5333;
break;
case 0x016:
dev_priv->fsb_freq = 5866;
break;
case 0x018:
dev_priv->fsb_freq = 6400;
break;
default:
DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
}
 
if (dev_priv->fsb_freq == 3200) {
dev_priv->ips.c_m = 0;
} else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
dev_priv->ips.c_m = 1;
} else {
dev_priv->ips.c_m = 2;
}
}
 
static const struct cxsr_latency cxsr_latency_table[] = {
{1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
{1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
{1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
{1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
{1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
 
{1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
{1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
{1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
{1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
{1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
 
{1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
{1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
{1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
{1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
{1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
 
{0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
{0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
{0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
{0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
{0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
 
{0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
{0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
{0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
{0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
{0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
 
{0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
{0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
{0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
{0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
{0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
};
 
static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
int is_ddr3,
int fsb,
int mem)
{
const struct cxsr_latency *latency;
int i;
 
if (fsb == 0 || mem == 0)
return NULL;
 
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
latency = &cxsr_latency_table[i];
if (is_desktop == latency->is_desktop &&
is_ddr3 == latency->is_ddr3 &&
fsb == latency->fsb_freq && mem == latency->mem_freq)
return latency;
}
 
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
 
return NULL;
}
 
static void pineview_disable_cxsr(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* deactivate cxsr */
I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
}
 
/*
* Latency for FIFO fetches is dependent on several factors:
* - memory configuration (speed, channels)
* - chipset
* - current MCH state
* It can be fairly high in some situations, so here we assume a fairly
* pessimal value. It's a tradeoff between extra memory fetches (if we
* set this value too high, the FIFO will fetch frequently to stay full)
* and power consumption (set it too low to save power and we might see
* FIFO underruns and display "flicker").
*
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
static const int latency_ns = 5000;
 
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
if (plane)
size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
static int i85x_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x1ff;
if (plane)
size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
static int i845_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
size >>= 2; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A",
size);
 
return size;
}
 
static int i830_get_fifo_size(struct drm_device *dev, int plane)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dsparb = I915_READ(DSPARB);
int size;
 
size = dsparb & 0x7f;
size >>= 1; /* Convert to cachelines */
 
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
 
return size;
}
 
/* Pineview has different values for various configs */
static const struct intel_watermark_params pineview_display_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_display_hplloff_wm = {
PINEVIEW_DISPLAY_FIFO,
PINEVIEW_MAX_WM,
PINEVIEW_DFT_HPLLOFF_WM,
PINEVIEW_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params pineview_cursor_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
PINEVIEW_CURSOR_FIFO,
PINEVIEW_CURSOR_MAX_WM,
PINEVIEW_CURSOR_DFT_WM,
PINEVIEW_CURSOR_GUARD_WM,
PINEVIEW_FIFO_LINE_SIZE
};
static const struct intel_watermark_params g4x_wm_info = {
G4X_FIFO_SIZE,
G4X_MAX_WM,
G4X_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params g4x_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_wm_info = {
VALLEYVIEW_FIFO_SIZE,
VALLEYVIEW_MAX_WM,
VALLEYVIEW_MAX_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params valleyview_cursor_wm_info = {
I965_CURSOR_FIFO,
VALLEYVIEW_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
G4X_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i965_cursor_wm_info = {
I965_CURSOR_FIFO,
I965_CURSOR_MAX_WM,
I965_CURSOR_DFT_WM,
2,
I915_FIFO_LINE_SIZE,
};
static const struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i915_wm_info = {
I915_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I915_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i855_wm_info = {
I855GM_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
static const struct intel_watermark_params i830_wm_info = {
I830_FIFO_SIZE,
I915_MAX_WM,
1,
2,
I830_FIFO_LINE_SIZE
};
 
static const struct intel_watermark_params ironlake_display_wm_info = {
ILK_DISPLAY_FIFO,
ILK_DISPLAY_MAXWM,
ILK_DISPLAY_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_wm_info = {
ILK_CURSOR_FIFO,
ILK_CURSOR_MAXWM,
ILK_CURSOR_DFTWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
ILK_DISPLAY_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
static const struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_CURSOR_SR_FIFO,
ILK_CURSOR_MAX_SRWM,
ILK_CURSOR_DFT_SRWM,
2,
ILK_FIFO_LINE_SIZE
};
 
static const struct intel_watermark_params sandybridge_display_wm_info = {
SNB_DISPLAY_FIFO,
SNB_DISPLAY_MAXWM,
SNB_DISPLAY_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_wm_info = {
SNB_CURSOR_FIFO,
SNB_CURSOR_MAXWM,
SNB_CURSOR_DFTWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_display_srwm_info = {
SNB_DISPLAY_SR_FIFO,
SNB_DISPLAY_MAX_SRWM,
SNB_DISPLAY_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
SNB_CURSOR_SR_FIFO,
SNB_CURSOR_MAX_SRWM,
SNB_CURSOR_DFT_SRWM,
2,
SNB_FIFO_LINE_SIZE
};
 
 
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
* @wm: chip FIFO params
* @pixel_size: display pixel size
* @latency_ns: memory latency for the platform
*
* Calculate the watermark level (the level at which the display plane will
* start fetching from memory again). Each chip has a different display
* FIFO size and allocation, so the caller needs to figure that out and pass
* in the correct intel_watermark_params structure.
*
* As the pixel clock runs, the FIFO will be drained at a rate that depends
* on the pixel size. When it reaches the watermark level, it'll start
* fetching FIFO line sized based chunks from memory until the FIFO fills
* past the watermark point. If the FIFO drains completely, a FIFO underrun
* will occur, and a display engine hang could result.
*/
static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
const struct intel_watermark_params *wm,
int fifo_size,
int pixel_size,
unsigned long latency_ns)
{
long entries_required, wm_size;
 
/*
* Note: we need to make sure we don't overflow for various clock &
* latency values.
* clocks go from a few thousand to several hundred thousand.
* latency is usually a few thousand
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1000;
entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 
DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
 
wm_size = fifo_size - (entries_required + wm->guard_size);
 
DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
 
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
if (wm_size <= 0)
wm_size = wm->default_wm;
return wm_size;
}
 
static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
{
struct drm_crtc *crtc, *enabled = NULL;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled && crtc->fb) {
if (enabled)
return NULL;
enabled = crtc;
}
}
 
return enabled;
}
 
static void pineview_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
unsigned long wm;
 
latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
dev_priv->fsb_freq, dev_priv->mem_freq);
if (!latency) {
DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
pineview_disable_cxsr(dev);
return;
}
 
crtc = single_enabled_crtc(dev);
if (crtc) {
int clock = crtc->mode.clock;
int pixel_size = crtc->fb->bits_per_pixel / 8;
 
/* Display SR */
wm = intel_calculate_wm(clock, &pineview_display_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->display_sr);
reg = I915_READ(DSPFW1);
reg &= ~DSPFW_SR_MASK;
reg |= wm << DSPFW_SR_SHIFT;
I915_WRITE(DSPFW1, reg);
DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
 
/* cursor SR */
wm = intel_calculate_wm(clock, &pineview_cursor_wm,
pineview_display_wm.fifo_size,
pixel_size, latency->cursor_sr);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_CURSOR_SR_MASK;
reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
I915_WRITE(DSPFW3, reg);
 
/* Display HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->display_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_SR_MASK;
reg |= wm & DSPFW_HPLL_SR_MASK;
I915_WRITE(DSPFW3, reg);
 
/* cursor HPLL off SR */
wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
pineview_display_hplloff_wm.fifo_size,
pixel_size, latency->cursor_hpll_disable);
reg = I915_READ(DSPFW3);
reg &= ~DSPFW_HPLL_CURSOR_MASK;
reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
I915_WRITE(DSPFW3, reg);
DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
 
/* activate cxsr */
I915_WRITE(DSPFW3,
I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
DRM_DEBUG_KMS("Self-refresh is enabled\n");
} else {
pineview_disable_cxsr(dev);
DRM_DEBUG_KMS("Self-refresh is disabled\n");
}
}
 
static bool g4x_compute_wm0(struct drm_device *dev,
int plane,
const struct intel_watermark_params *display,
int display_latency_ns,
const struct intel_watermark_params *cursor,
int cursor_latency_ns,
int *plane_wm,
int *cursor_wm)
{
struct drm_crtc *crtc;
 
int htotal, hdisplay, clock, pixel_size;
int line_time_us, line_count;
int entries, tlb_miss;
 
// ENTER();
 
// dbgprintf("plane %d display %x cursor %x \n", plane, display, cursor);
// dbgprintf("plane_wm %x cursor_wm %x \n", plane_wm, cursor_wm);
 
crtc = intel_get_crtc_for_plane(dev, plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
// dbgprintf("CRTC %d\n, fb %x, enabled %d\n",
// crtc->base.id, crtc->fb, crtc->enabled );
 
 
 
if (crtc->fb == NULL || !crtc->enabled || !intel_crtc->active) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
}
 
htotal = crtc->mode.htotal;
hdisplay = crtc->mode.hdisplay;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
// dbgprintf("mark 1\n");
 
/* Use the small buffer method to calculate plane watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*plane_wm = entries + display->guard_size;
if (*plane_wm > (int)display->max_wm)
*plane_wm = display->max_wm;
 
// dbgprintf("clock %d line_time_us %d\n",clock, line_time_us );
 
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
 
entries = line_count * 64 * pixel_size;
 
// dbgprintf("mark 3\n");
 
// dbgprintf("fifo size %d line size %d\n",
// cursor->fifo_size, cursor->cacheline_size);
 
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
 
if (tlb_miss > 0)
entries += tlb_miss;
 
// dbgprintf("mark 4\n");
 
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
 
// dbgprintf("entries %d \n",entries);
 
*cursor_wm = entries + cursor->guard_size;
 
if (*cursor_wm > (int)cursor->max_wm)
*cursor_wm = (int)cursor->max_wm;
 
// LEAVE();
 
return true;
}
 
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool g4x_check_srwm(struct drm_device *dev,
int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
display_wm, cursor_wm);
 
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
display_wm, display->max_wm);
return false;
}
 
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
cursor_wm, cursor->max_wm);
return false;
}
 
if (!(display_wm || cursor_wm)) {
DRM_DEBUG_KMS("SR latency is 0, disabling\n");
return false;
}
 
return true;
}
 
static bool g4x_compute_srwm(struct drm_device *dev,
int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
int hdisplay, htotal, pixel_size, clock;
unsigned long line_time_us;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*display_wm = *cursor_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
return g4x_check_srwm(dev,
*display_wm, *cursor_wm,
display, cursor);
}
 
static bool vlv_compute_drain_latency(struct drm_device *dev,
int plane,
int *plane_prec_mult,
int *plane_dl,
int *cursor_prec_mult,
int *cursor_dl)
{
struct drm_crtc *crtc;
int clock, pixel_size;
int entries;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled)
return false;
 
clock = crtc->mode.clock; /* VESA DOT Clock */
pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
 
entries = (clock / 1000) * pixel_size;
*plane_prec_mult = (entries > 256) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
*plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
pixel_size);
 
entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
*cursor_prec_mult = (entries > 256) ?
DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
*cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
 
return true;
}
 
/*
* Update drain latency registers of memory arbiter
*
* Valleyview SoC has a new memory arbiter and needs drain latency registers
* to be programmed. Each plane has a drain latency multiplier and a drain
* latency value.
*/
 
static void vlv_update_drain_latency(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_prec, planea_dl, planeb_prec, planeb_dl;
int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
either 16 or 32 */
 
/* For plane A, Cursor A */
if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
&cursor_prec_mult, &cursora_dl)) {
cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
 
I915_WRITE(VLV_DDL1, cursora_prec |
(cursora_dl << DDL_CURSORA_SHIFT) |
planea_prec | planea_dl);
}
 
/* For plane B, Cursor B */
if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
&cursor_prec_mult, &cursorb_dl)) {
cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
 
I915_WRITE(VLV_DDL2, cursorb_prec |
(cursorb_dl << DDL_CURSORB_SHIFT) |
planeb_prec | planeb_dl);
}
}
 
#define single_plane_enabled(mask) is_power_of_2(mask)
 
static void valleyview_update_wm(struct drm_device *dev)
{
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
 
vlv_update_drain_latency(dev);
 
if (g4x_compute_wm0(dev, 0,
&valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1;
 
if (g4x_compute_wm0(dev, 1,
&valleyview_wm_info, latency_ns,
&valleyview_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 2;
 
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&plane_sr, &cursor_sr))
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
else
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
}
 
static void g4x_update_wm(struct drm_device *dev)
{
static const int sr_latency_ns = 12000;
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
unsigned int enabled = 0;
 
if (g4x_compute_wm0(dev, 0,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planea_wm, &cursora_wm))
enabled |= 1;
 
if (g4x_compute_wm0(dev, 1,
&g4x_wm_info, latency_ns,
&g4x_cursor_wm_info, latency_ns,
&planeb_wm, &cursorb_wm))
enabled |= 2;
 
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
else
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
planeb_wm, cursorb_wm,
plane_sr, cursor_sr);
 
I915_WRITE(DSPFW1,
(plane_sr << DSPFW_SR_SHIFT) |
(cursorb_wm << DSPFW_CURSORB_SHIFT) |
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void i965_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
 
/* Calc sr entries for one plane configs */
crtc = single_enabled_crtc(dev);
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
int clock = crtc->mode.clock;
int htotal = crtc->mode.htotal;
int hdisplay = crtc->mode.hdisplay;
int pixel_size = crtc->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
line_time_us = ((htotal * 1000) / clock);
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
srwm = I965_FIFO_SIZE - entries;
if (srwm < 0)
srwm = 1;
srwm &= 0x1ff;
DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
entries, srwm);
 
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * 64;
entries = DIV_ROUND_UP(entries,
i965_cursor_wm_info.cacheline_size);
cursor_sr = i965_cursor_wm_info.fifo_size -
(entries + i965_cursor_wm_info.guard_size);
 
if (cursor_sr > i965_cursor_wm_info.max_wm)
cursor_sr = i965_cursor_wm_info.max_wm;
 
DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
"cursor %d\n", srwm, cursor_sr);
 
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
if (IS_CRESTLINE(dev))
I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
& ~FW_BLC_SELF_EN);
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
srwm);
 
/* 965 has limitations... */
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
(8 << 16) | (8 << 8) | (8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
/* update cursor SR watermark */
I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void i9xx_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
const struct intel_watermark_params *wm_info;
uint32_t fwater_lo;
uint32_t fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
struct drm_crtc *crtc, *enabled = NULL;
 
if (IS_I945GM(dev))
wm_info = &i945_wm_info;
else if (!IS_GEN2(dev))
wm_info = &i915_wm_info;
else
wm_info = &i855_wm_info;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
planea_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
enabled = crtc;
} else
planea_wm = fifo_size - wm_info->guard_size;
 
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
planeb_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
latency_ns);
if (enabled == NULL)
enabled = crtc;
else
enabled = NULL;
} else
planeb_wm = fifo_size - wm_info->guard_size;
 
DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
 
/*
* Overlay gets an aggressive default since video jitter is bad.
*/
cwm = 2;
 
/* Play safe and disable self-refresh before adjusting watermarks. */
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
 
/* Calc sr entries for one plane configs */
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
int clock = enabled->mode.clock;
int htotal = enabled->mode.htotal;
int hdisplay = enabled->mode.hdisplay;
int pixel_size = enabled->fb->bits_per_pixel / 8;
unsigned long line_time_us;
int entries;
 
line_time_us = (htotal * 1000) / clock;
 
/* Use ns/us then divide to preserve precision */
entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
pixel_size * hdisplay;
entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
srwm = wm_info->fifo_size - entries;
if (srwm < 0)
srwm = 1;
 
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
else if (IS_I915GM(dev))
I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
planea_wm, planeb_wm, cwm, srwm);
 
fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
fwater_hi = (cwm & 0x1f);
 
/* Set request length to 8 cachelines per fetch */
fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
fwater_hi = fwater_hi | (1 << 8);
 
I915_WRITE(FW_BLC, fwater_lo);
I915_WRITE(FW_BLC2, fwater_hi);
 
if (HAS_FW_BLC(dev)) {
if (enabled) {
if (IS_I945G(dev) || IS_I945GM(dev))
I915_WRITE(FW_BLC_SELF,
FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
else if (IS_I915GM(dev))
I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
DRM_DEBUG_KMS("memory self refresh enabled\n");
} else
DRM_DEBUG_KMS("memory self refresh disabled\n");
}
}
 
static void i830_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
uint32_t fwater_lo;
int planea_wm;
 
crtc = single_enabled_crtc(dev);
if (crtc == NULL)
return;
 
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
crtc->fb->bits_per_pixel / 8,
latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
 
I915_WRITE(FW_BLC, fwater_lo);
}
 
#define ILK_LP0_PLANE_LATENCY 700
#define ILK_LP0_CURSOR_LATENCY 1300
 
/*
* Check the wm result.
*
* If any calculated watermark values is larger than the maximum value that
* can be programmed into the associated watermark register, that watermark
* must be disabled.
*/
static bool ironlake_check_srwm(struct drm_device *dev, int level,
int fbc_wm, int display_wm, int cursor_wm,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
" cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
 
if (fbc_wm > SNB_FBC_MAX_SRWM) {
DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
fbc_wm, SNB_FBC_MAX_SRWM, level);
 
/* fbc has it's own way to disable FBC WM */
I915_WRITE(DISP_ARB_CTL,
I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
return false;
}
 
if (display_wm > display->max_wm) {
DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
display_wm, SNB_DISPLAY_MAX_SRWM, level);
return false;
}
 
if (cursor_wm > cursor->max_wm) {
DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
cursor_wm, SNB_CURSOR_MAX_SRWM, level);
return false;
}
 
if (!(fbc_wm || display_wm || cursor_wm)) {
DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
return false;
}
 
return true;
}
 
/*
* Compute watermark values of WM[1-3],
*/
static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
int latency_ns,
const struct intel_watermark_params *display,
const struct intel_watermark_params *cursor,
int *fbc_wm, int *display_wm, int *cursor_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int hdisplay, htotal, pixel_size, clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*fbc_wm = *display_wm = *cursor_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
hdisplay = crtc->mode.hdisplay;
htotal = crtc->mode.htotal;
clock = crtc->mode.clock;
pixel_size = crtc->fb->bits_per_pixel / 8;
 
line_time_us = (htotal * 1000) / clock;
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = hdisplay * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*display_wm = entries + display->guard_size;
 
/*
* Spec says:
* FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
*/
*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
 
/* calculate the self-refresh watermark for display cursor */
entries = line_count * pixel_size * 64;
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
*cursor_wm = entries + cursor->guard_size;
 
return ironlake_check_srwm(dev, level,
*fbc_wm, *display_wm, *cursor_wm,
display, cursor);
}
 
static void ironlake_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
 
if (g4x_compute_wm0(dev, 1,
&ironlake_display_wm_info,
ILK_LP0_PLANE_LATENCY,
&ironlake_cursor_wm_info,
ILK_LP0_CURSOR_LATENCY,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled))
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
ILK_READ_WM1_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
ILK_READ_WM2_LATENCY() * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/*
* WM3 is unsupported on ILK, probably because we don't have latency
* data for that power state
*/
}
 
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEA_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEA_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
 
if (g4x_compute_wm0(dev, 1,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEB_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEB_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
 
if ((dev_priv->num_pipe == 3) &&
g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEC_IVB);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEC_IVB, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 3;
}
 
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled) ||
dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
}
 
static void
haswell_update_linetime_wm(struct drm_device *dev, int pipe,
struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
 
temp = I915_READ(PIPE_WM_LINETIME(pipe));
temp &= ~PIPE_WM_LINETIME_MASK;
 
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
temp |= PIPE_WM_LINETIME_TIME(
((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
 
/* IPS watermarks are only used by pipe A, and are ignored by
* pipes B and C. They are calculated similarly to the common
* linetime values, except that we are using CD clock frequency
* in MHz instead of pixel rate for the division.
*
* This is a placeholder for the IPS watermark calculation code.
*/
 
I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
}
 
static bool
sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int display_latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
int clock;
int entries, tlb_miss;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
*sprite_wm = display->guard_size;
return false;
}
 
clock = crtc->mode.clock;
 
/* Use the small buffer method to calculate the sprite watermark */
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
tlb_miss = display->fifo_size*display->cacheline_size -
sprite_width * 8;
if (tlb_miss > 0)
entries += tlb_miss;
entries = DIV_ROUND_UP(entries, display->cacheline_size);
*sprite_wm = entries + display->guard_size;
if (*sprite_wm > (int)display->max_wm)
*sprite_wm = display->max_wm;
 
return true;
}
 
static bool
sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
uint32_t sprite_width, int pixel_size,
const struct intel_watermark_params *display,
int latency_ns, int *sprite_wm)
{
struct drm_crtc *crtc;
unsigned long line_time_us;
int clock;
int line_count, line_size;
int small, large;
int entries;
 
if (!latency_ns) {
*sprite_wm = 0;
return false;
}
 
crtc = intel_get_crtc_for_plane(dev, plane);
clock = crtc->mode.clock;
if (!clock) {
*sprite_wm = 0;
return false;
}
 
line_time_us = (sprite_width * 1000) / clock;
if (!line_time_us) {
*sprite_wm = 0;
return false;
}
 
line_count = (latency_ns / line_time_us + 1000) / 1000;
line_size = sprite_width * pixel_size;
 
/* Use the minimum of the small and large buffer method for primary */
small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
large = line_count * line_size;
 
entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
*sprite_wm = entries + display->guard_size;
 
return *sprite_wm > 0x3ff ? false : true;
}
 
static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
u32 val;
int sprite_wm, reg;
int ret;
 
switch (pipe) {
case 0:
reg = WM0_PIPEA_ILK;
break;
case 1:
reg = WM0_PIPEB_ILK;
break;
case 2:
reg = WM0_PIPEC_IVB;
break;
default:
return; /* bad pipe */
}
 
ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
&sandybridge_display_wm_info,
latency, &sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
pipe);
return;
}
 
val = I915_READ(reg);
val &= ~WM0_PIPE_SPRITE_MASK;
I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
 
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM1_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM1S_LP_ILK, sprite_wm);
 
/* Only IVB has two more LP watermarks for sprite */
if (!IS_IVYBRIDGE(dev))
return;
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM2_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM2S_LP_IVB, sprite_wm);
 
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
SNB_READ_WM3_LATENCY() * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
pipe);
return;
}
I915_WRITE(WM3S_LP_IVB, sprite_wm);
}
 
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
* Calculate watermark values for the various WM regs based on current mode
* and plane configuration.
*
* There are several cases to deal with here:
* - normal (i.e. non-self-refresh)
* - self-refresh (SR) mode
* - lines are large relative to FIFO size (buffer can hold up to 2)
* - lines are small relative to FIFO size (buffer can hold more than 2
* lines), so need to account for TLB latency
*
* The normal calculation is:
* watermark = dotclock * bytes per pixel * latency
* where latency is platform & configuration dependent (we assume pessimal
* values here).
*
* The SR calculation is:
* watermark = (trunc(latency/line time)+1) * surface width *
* bytes per pixel
* where
* line time = htotal / dotclock
* surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high, as above.
*
* The final value programmed to the register should always be rounded up,
* and include an extra 2 entries to account for clock crossings.
*
* We don't use the sprite, so we can ignore that. And on Crestline we have
* to set the non-SR watermarks to 8.
*/
void intel_update_watermarks(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_wm)
dev_priv->display.update_wm(dev);
 
}
 
void intel_update_linetime_watermarks(struct drm_device *dev,
int pipe, struct drm_display_mode *mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_linetime_wm)
dev_priv->display.update_linetime_wm(dev, pipe, mode);
}
 
void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
uint32_t sprite_width, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.update_sprite_wm)
dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
pixel_size);
}
 
static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
struct drm_i915_gem_object *ctx;
int ret;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
ctx = i915_gem_alloc_object(dev, 4096);
if (!ctx) {
DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
return NULL;
}
 
ret = i915_gem_object_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
}
 
ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
if (ret) {
DRM_ERROR("failed to set-domain on power context: %d\n", ret);
goto err_unpin;
}
 
return ctx;
 
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
 
/**
* Lock protecting IPS related data structures
*/
DEFINE_SPINLOCK(mchdev_lock);
 
/* Global for IPS driver to get at the current i915 device. Protected by
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
 
bool ironlake_set_drps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
 
assert_spin_locked(&mchdev_lock);
 
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
DRM_DEBUG("gpu busy, RCS change rejected\n");
return false; /* still busy with another command */
}
 
rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
I915_WRITE16(MEMSWCTL, rgvswctl);
POSTING_READ16(MEMSWCTL);
 
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE16(MEMSWCTL, rgvswctl);
 
return true;
}
 
static void ironlake_enable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rgvmodectl = I915_READ(MEMMODECTL);
u8 fmax, fmin, fstart, vstart;
 
spin_lock_irq(&mchdev_lock);
 
/* Enable temp reporting */
I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
 
/* 100ms RC evaluation intervals */
I915_WRITE(RCUPEI, 100000);
I915_WRITE(RCDNEI, 100000);
 
/* Set max/min thresholds to 90ms and 80ms respectively */
I915_WRITE(RCBMAXAVG, 90000);
I915_WRITE(RCBMINAVG, 80000);
 
I915_WRITE(MEMIHYST, 1);
 
/* Set up min, max, and cur for interrupt handling */
fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
 
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
 
dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
dev_priv->ips.fstart = fstart;
 
dev_priv->ips.max_delay = fstart;
dev_priv->ips.min_delay = fmin;
dev_priv->ips.cur_delay = fstart;
 
DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
fmax, fmin, fstart);
 
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
/*
* Interrupts will be enabled in ironlake_irq_postinstall
*/
 
I915_WRITE(VIDSTART, vstart);
POSTING_READ(VIDSTART);
 
rgvmodectl |= MEMMODE_SWMODE_EN;
I915_WRITE(MEMMODECTL, rgvmodectl);
 
if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
DRM_ERROR("stuck trying to change perf mode\n");
mdelay(1);
 
ironlake_set_drps(dev, fstart);
 
dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
I915_READ(0x112e0);
dev_priv->ips.last_time1 = jiffies_to_msecs(GetTimerTicks());
dev_priv->ips.last_count2 = I915_READ(0x112f4);
// getrawmonotonic(&dev_priv->ips.last_time2);
 
spin_unlock_irq(&mchdev_lock);
}
 
static void ironlake_disable_drps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u16 rgvswctl;
 
spin_lock_irq(&mchdev_lock);
 
rgvswctl = I915_READ16(MEMSWCTL);
 
/* Ack interrupts, disable EFC interrupt */
I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
 
/* Go back to the starting frequency */
ironlake_set_drps(dev, dev_priv->ips.fstart);
mdelay(1);
rgvswctl |= MEMCTL_CMD_STS;
I915_WRITE(MEMSWCTL, rgvswctl);
mdelay(1);
 
spin_unlock_irq(&mchdev_lock);
}
 
/* There's a funny hw issue where the hw returns all 0 when reading from
* GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
* ourselves, instead of doing a rmw cycle (which might result in us clearing
* all limits and the gpu stuck at whatever frequency it is at atm).
*/
static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
{
u32 limits;
 
limits = 0;
 
if (*val >= dev_priv->rps.max_delay)
*val = dev_priv->rps.max_delay;
limits |= dev_priv->rps.max_delay << 24;
 
/* Only set the down limit when we've reached the lowest level to avoid
* getting more interrupts, otherwise leave this clear. This prevents a
* race in the hw when coming out of rc6: There's a tiny window where
* the hw runs at the minimal clock before selecting the desired
* frequency, if the down threshold expires in that window we will not
* receive a down interrupt. */
if (*val <= dev_priv->rps.min_delay) {
*val = dev_priv->rps.min_delay;
limits |= dev_priv->rps.min_delay << 16;
}
 
return limits;
}
 
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
 
if (val == dev_priv->rps.cur_delay)
return;
 
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(val) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
 
/* Make sure we continue to get interrupts
* until we hit the minimum or maximum frequencies.
*/
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
 
POSTING_READ(GEN6_RPNSWREQ);
 
dev_priv->rps.cur_delay = val;
 
trace_intel_gpu_freq_change(val * 50);
}
 
static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(GEN6_RC_CONTROL, 0);
I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, 0);
/* Complete PM interrupt masking here doesn't race with the rps work
* item again unmasking PM interrupts because that is using a different
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
 
spin_lock_irq(&dev_priv->rps.lock);
dev_priv->rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->rps.lock);
 
I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
}
 
int intel_enable_rc6(const struct drm_device *dev)
{
/* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
 
if (INTEL_INFO(dev)->gen == 5) {
#ifdef CONFIG_INTEL_IOMMU
/* Disable rc6 on ilk if VT-d is on. */
if (intel_iommu_gfx_mapped)
return false;
#endif
DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
return INTEL_RC6_ENABLE;
}
 
if (IS_HASWELL(dev)) {
DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
return INTEL_RC6_ENABLE;
}
 
/* snb/ivb have more than one rc6 state. */
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
return INTEL_RC6_ENABLE;
}
 
DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
 
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int i;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
*
* Perhaps there might be some value in exposing these to
* userspace...
*/
I915_WRITE(GEN6_RC_STATE, 0);
 
/* Clear the DBG now so we don't confuse earlier errors */
if ((gtfifodbg = I915_READ(GTFIFODBG))) {
DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
I915_WRITE(GTFIFODBG, gtfifodbg);
}
 
gen6_gt_force_wake_get(dev_priv);
 
rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
 
/* In units of 100MHz */
dev_priv->rps.max_delay = rp_state_cap & 0xff;
dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
dev_priv->rps.cur_delay = 0;
 
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
 
I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
 
for_each_ring(ring, dev_priv, i)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
 
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
/* Check if we are enabling RC6 */
rc6_mode = intel_enable_rc6(dev_priv->dev);
if (rc6_mode & INTEL_RC6_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
 
/* We don't use those on Haswell */
if (!IS_HASWELL(dev)) {
if (rc6_mode & INTEL_RC6p_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
 
if (rc6_mode & INTEL_RC6pp_ENABLE)
rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
}
 
DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
(rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
(rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
(rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
 
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
GEN6_RC_CTL_EI_MODE(1) |
GEN6_RC_CTL_HW_ENABLE);
 
I915_WRITE(GEN6_RPNSWREQ,
GEN6_FREQUENCY(10) |
GEN6_OFFSET(0) |
GEN6_AGGRESSIVE_TURBO);
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN6_FREQUENCY(12));
 
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
dev_priv->rps.max_delay << 24 |
dev_priv->rps.min_delay << 16);
 
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
I915_WRITE(GEN6_RP_UP_EI, 66000);
I915_WRITE(GEN6_RP_DOWN_EI, 350000);
 
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
 
I915_WRITE(GEN6_PCODE_DATA, 0);
I915_WRITE(GEN6_PCODE_MAILBOX,
GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
 
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
spin_lock_irq(&dev_priv->rps.lock);
WARN_ON(dev_priv->rps.pm_iir != 0);
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps.lock);
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
 
gen6_gt_force_wake_put(dev_priv);
}
 
#if 0
static void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int scaling_factor = 180;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
max_ia_freq = cpufreq_quick_get_max(0);
/*
* Default to measured freq if none found, PCU will ensure we don't go
* over
*/
if (!max_ia_freq)
max_ia_freq = tsc_khz;
 
/* Convert from kHz to MHz */
max_ia_freq /= 1000;
 
/*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
*/
for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
gpu_freq--) {
int diff = dev_priv->rps.max_delay - gpu_freq;
 
/*
* For GPU frequencies less than 750MHz, just use the lowest
* ring freq.
*/
if (gpu_freq < min_freq)
ia_freq = 800;
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
 
I915_WRITE(GEN6_PCODE_DATA,
(ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode write of freq table timed out\n");
continue;
}
}
}
#endif
 
void ironlake_teardown_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->renderctx) {
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
}
 
if (dev_priv->pwrctx) {
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
}
}
 
static void ironlake_disable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (I915_READ(PWRCTXA)) {
/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
50);
 
I915_WRITE(PWRCTXA, 0);
POSTING_READ(PWRCTXA);
 
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
POSTING_READ(RSTDBYCTL);
}
}
 
static int ironlake_setup_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
return -ENOMEM;
 
if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
 
return 0;
}
 
static void ironlake_enable_rc6(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!intel_enable_rc6(dev))
return;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
ret = ironlake_setup_rc6(dev);
if (ret)
return;
 
/*
* GPU can automatically power down the render unit if given a page
* to save state.
*/
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
return;
}
 
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
MI_RESTORE_INHIBIT);
intel_ring_emit(ring, MI_SUSPEND_FLUSH);
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_FLUSH);
intel_ring_advance(ring);
 
/*
* Wait for the command parser to advance past MI_SET_CONTEXT. The HW
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(ring);
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
return;
}
 
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
 
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
int div = (vidfreq & 0x3f0000) >> 16;
int post = (vidfreq & 0x3000) >> 12;
int pre = (vidfreq & 0x7);
 
if (!pre)
return 0;
 
freq = ((div * 133333) / ((1<<post) * pre));
 
return freq;
}
 
static const struct cparams {
u16 i;
u16 t;
u16 m;
u16 c;
} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
{ 0, 1333, 276, 27605 },
{ 0, 1066, 276, 27605 },
{ 0, 800, 231, 23784 },
};
 
static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
{
u64 total_count, diff, ret;
u32 count1, count2, count3, m = 0, c = 0;
unsigned long now = jiffies_to_msecs(GetTimerTicks()), diff1;
int i;
 
assert_spin_locked(&mchdev_lock);
 
diff1 = now - dev_priv->ips.last_time1;
 
/* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
* faster than once in 10ms, so just return the saved value
* in such cases.
*/
if (diff1 <= 10)
return dev_priv->ips.chipset_power;
 
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
 
total_count = count1 + count2 + count3;
 
/* FIXME: handle per-counter overflow */
if (total_count < dev_priv->ips.last_count1) {
diff = ~0UL - dev_priv->ips.last_count1;
diff += total_count;
} else {
diff = total_count - dev_priv->ips.last_count1;
}
 
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
if (cparams[i].i == dev_priv->ips.c_m &&
cparams[i].t == dev_priv->ips.r_t) {
m = cparams[i].m;
c = cparams[i].c;
break;
}
}
 
diff = div_u64(diff, diff1);
ret = ((m * diff) + c);
ret = div_u64(ret, 10);
 
dev_priv->ips.last_count1 = total_count;
dev_priv->ips.last_time1 = now;
 
dev_priv->ips.chipset_power = ret;
 
return ret;
}
 
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
 
if (dev_priv->info->gen != 5)
return 0;
 
spin_lock_irq(&mchdev_lock);
 
val = __i915_chipset_val(dev_priv);
 
spin_unlock_irq(&mchdev_lock);
 
return val;
}
 
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
{
unsigned long m, x, b;
u32 tsfs;
 
tsfs = I915_READ(TSFS);
 
m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
x = I915_READ8(TR1);
 
b = tsfs & TSFS_INTR_MASK;
 
return ((m * x) / 127) - b;
}
 
static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
static const struct v_table {
u16 vd; /* in .1 mil */
u16 vm; /* in .1 mil */
} v_table[] = {
{ 0, 0, },
{ 375, 0, },
{ 500, 0, },
{ 625, 0, },
{ 750, 0, },
{ 875, 0, },
{ 1000, 0, },
{ 1125, 0, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4125, 3000, },
{ 4250, 3125, },
{ 4375, 3250, },
{ 4500, 3375, },
{ 4625, 3500, },
{ 4750, 3625, },
{ 4875, 3750, },
{ 5000, 3875, },
{ 5125, 4000, },
{ 5250, 4125, },
{ 5375, 4250, },
{ 5500, 4375, },
{ 5625, 4500, },
{ 5750, 4625, },
{ 5875, 4750, },
{ 6000, 4875, },
{ 6125, 5000, },
{ 6250, 5125, },
{ 6375, 5250, },
{ 6500, 5375, },
{ 6625, 5500, },
{ 6750, 5625, },
{ 6875, 5750, },
{ 7000, 5875, },
{ 7125, 6000, },
{ 7250, 6125, },
{ 7375, 6250, },
{ 7500, 6375, },
{ 7625, 6500, },
{ 7750, 6625, },
{ 7875, 6750, },
{ 8000, 6875, },
{ 8125, 7000, },
{ 8250, 7125, },
{ 8375, 7250, },
{ 8500, 7375, },
{ 8625, 7500, },
{ 8750, 7625, },
{ 8875, 7750, },
{ 9000, 7875, },
{ 9125, 8000, },
{ 9250, 8125, },
{ 9375, 8250, },
{ 9500, 8375, },
{ 9625, 8500, },
{ 9750, 8625, },
{ 9875, 8750, },
{ 10000, 8875, },
{ 10125, 9000, },
{ 10250, 9125, },
{ 10375, 9250, },
{ 10500, 9375, },
{ 10625, 9500, },
{ 10750, 9625, },
{ 10875, 9750, },
{ 11000, 9875, },
{ 11125, 10000, },
{ 11250, 10125, },
{ 11375, 10250, },
{ 11500, 10375, },
{ 11625, 10500, },
{ 11750, 10625, },
{ 11875, 10750, },
{ 12000, 10875, },
{ 12125, 11000, },
{ 12250, 11125, },
{ 12375, 11250, },
{ 12500, 11375, },
{ 12625, 11500, },
{ 12750, 11625, },
{ 12875, 11750, },
{ 13000, 11875, },
{ 13125, 12000, },
{ 13250, 12125, },
{ 13375, 12250, },
{ 13500, 12375, },
{ 13625, 12500, },
{ 13750, 12625, },
{ 13875, 12750, },
{ 14000, 12875, },
{ 14125, 13000, },
{ 14250, 13125, },
{ 14375, 13250, },
{ 14500, 13375, },
{ 14625, 13500, },
{ 14750, 13625, },
{ 14875, 13750, },
{ 15000, 13875, },
{ 15125, 14000, },
{ 15250, 14125, },
{ 15375, 14250, },
{ 15500, 14375, },
{ 15625, 14500, },
{ 15750, 14625, },
{ 15875, 14750, },
{ 16000, 14875, },
{ 16125, 15000, },
};
if (dev_priv->info->is_mobile)
return v_table[pxvid].vm;
else
return v_table[pxvid].vd;
}
 
static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
struct timespec now, diff1;
u64 diff;
unsigned long diffms;
u32 count;
 
assert_spin_locked(&mchdev_lock);
 
getrawmonotonic(&now);
diff1 = timespec_sub(now, dev_priv->ips.last_time2);
 
/* Don't divide by 0 */
diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
if (!diffms)
return;
 
count = I915_READ(GFXEC);
 
if (count < dev_priv->ips.last_count2) {
diff = ~0UL - dev_priv->ips.last_count2;
diff += count;
} else {
diff = count - dev_priv->ips.last_count2;
}
 
dev_priv->ips.last_count2 = count;
dev_priv->ips.last_time2 = now;
 
/* More magic constants... */
diff = diff * 1181;
diff = div_u64(diff, diffms * 10);
dev_priv->ips.gfx_power = diff;
}
 
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
if (dev_priv->info->gen != 5)
return;
 
spin_lock_irq(&mchdev_lock);
 
__i915_update_gfx_val(dev_priv);
 
spin_unlock_irq(&mchdev_lock);
}
 
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
 
assert_spin_locked(&mchdev_lock);
 
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
 
state1 = ext_v;
 
t = i915_mch_val(dev_priv);
 
/* Revel in the empirically derived constants */
 
/* Correction factor in 1/100000 units */
if (t > 80)
corr = ((t * 2349) + 135940);
else if (t >= 50)
corr = ((t * 964) + 29317);
else /* < 50 */
corr = ((t * 301) + 1004);
 
corr = corr * ((150142 * state1) / 10000 - 78642);
corr /= 100000;
corr2 = (corr * dev_priv->ips.corr);
 
state2 = (corr2 * state1) / 10000;
state2 /= 100; /* convert to mW */
 
__i915_update_gfx_val(dev_priv);
 
return dev_priv->ips.gfx_power + state2;
}
 
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
unsigned long val;
 
if (dev_priv->info->gen != 5)
return 0;
 
spin_lock_irq(&mchdev_lock);
 
val = __i915_gfx_val(dev_priv);
 
spin_unlock_irq(&mchdev_lock);
 
return val;
}
 
/**
* i915_read_mch_val - return value for IPS use
*
* Calculate and return a value for the IPS driver to use when deciding whether
* we have thermal and power headroom to increase CPU or GPU power budget.
*/
unsigned long i915_read_mch_val(void)
{
struct drm_i915_private *dev_priv;
unsigned long chipset_val, graphics_val, ret = 0;
 
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
 
chipset_val = __i915_chipset_val(dev_priv);
graphics_val = __i915_gfx_val(dev_priv);
 
ret = chipset_val + graphics_val;
 
out_unlock:
spin_unlock_irq(&mchdev_lock);
 
return ret;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
 
/**
* i915_gpu_raise - raise GPU frequency limit
*
* Raise the limit; IPS indicates we have thermal headroom.
*/
bool i915_gpu_raise(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
 
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
 
if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
dev_priv->ips.max_delay--;
 
out_unlock:
spin_unlock_irq(&mchdev_lock);
 
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
 
/**
* i915_gpu_lower - lower GPU frequency limit
*
* IPS indicates we're close to a thermal limit, so throttle back the GPU
* frequency maximum.
*/
bool i915_gpu_lower(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
 
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
 
if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
dev_priv->ips.max_delay++;
 
out_unlock:
spin_unlock_irq(&mchdev_lock);
 
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
 
/**
* i915_gpu_busy - indicate GPU business to IPS
*
* Tell the IPS driver whether or not the GPU is busy.
*/
bool i915_gpu_busy(void)
{
struct drm_i915_private *dev_priv;
struct intel_ring_buffer *ring;
bool ret = false;
int i;
 
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
 
for_each_ring(ring, dev_priv, i)
ret |= !list_empty(&ring->request_list);
 
out_unlock:
spin_unlock_irq(&mchdev_lock);
 
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
 
/**
* i915_gpu_turbo_disable - disable graphics turbo
*
* Disable graphics turbo by resetting the max frequency and setting the
* current frequency to the default.
*/
bool i915_gpu_turbo_disable(void)
{
struct drm_i915_private *dev_priv;
bool ret = true;
 
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
 
dev_priv->ips.max_delay = dev_priv->ips.fstart;
 
if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
ret = false;
 
out_unlock:
spin_unlock_irq(&mchdev_lock);
 
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
 
/**
* Tells the intel_ips driver that the i915 driver is now loaded, if
* IPS got loaded first.
*
* This awkward dance is so that neither module has to depend on the
* other in order for IPS to do the appropriate communication of
* GPU turbo limits to i915.
*/
static void
ips_ping_for_i915_load(void)
{
void (*link)(void);
 
// link = symbol_get(ips_link_to_i915_driver);
// if (link) {
// link();
// symbol_put(ips_link_to_i915_driver);
// }
}
 
void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
spin_lock_irq(&mchdev_lock);
i915_mch_dev = dev_priv;
spin_unlock_irq(&mchdev_lock);
 
ips_ping_for_i915_load();
}
 
void intel_gpu_ips_teardown(void)
{
spin_lock_irq(&mchdev_lock);
i915_mch_dev = NULL;
spin_unlock_irq(&mchdev_lock);
}
static void intel_init_emon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lcfuse;
u8 pxw[16];
int i;
 
/* Disable to program */
I915_WRITE(ECR, 0);
POSTING_READ(ECR);
 
/* Program energy weights for various events */
I915_WRITE(SDEW, 0x15040d00);
I915_WRITE(CSIEW0, 0x007f0000);
I915_WRITE(CSIEW1, 0x1e220004);
I915_WRITE(CSIEW2, 0x04000004);
 
for (i = 0; i < 5; i++)
I915_WRITE(PEW + (i * 4), 0);
for (i = 0; i < 3; i++)
I915_WRITE(DEW + (i * 4), 0);
 
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
unsigned long val;
 
val = vid * vid;
val *= (freq / 1000);
val *= 255;
val /= (127*127*900);
if (val > 0xff)
DRM_ERROR("bad pxval: %ld\n", val);
pxw[i] = val;
}
/* Render standby states get 0 weight */
pxw[14] = 0;
pxw[15] = 0;
 
for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
I915_WRITE(PXW + (i * 4), val);
}
 
/* Adjust magic regs to magic values (more experimental results) */
I915_WRITE(OGW0, 0);
I915_WRITE(OGW1, 0);
I915_WRITE(EG0, 0x00007f00);
I915_WRITE(EG1, 0x0000000e);
I915_WRITE(EG2, 0x000e0000);
I915_WRITE(EG3, 0x68000300);
I915_WRITE(EG4, 0x42000000);
I915_WRITE(EG5, 0x00140031);
I915_WRITE(EG6, 0);
I915_WRITE(EG7, 0);
 
for (i = 0; i < 8; i++)
I915_WRITE(PXWL + (i * 4), 0);
 
/* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019);
 
lcfuse = I915_READ(LCFUSE02);
 
dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
}
 
void intel_disable_gt_powersave(struct drm_device *dev)
{
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
gen6_disable_rps(dev);
}
}
 
void intel_enable_gt_powersave(struct drm_device *dev)
{
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
// gen6_enable_rps(dev);
// gen6_update_ring_freq(dev);
}
}
 
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
/* Required for FBC */
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
DPFCRUNIT_CLOCK_GATE_DISABLE |
DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
* The bit 22/21 of 0x42004
* The bit 5 of 0x42020
* The bit 15 of 0x45000
*/
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
I915_WRITE(ILK_DSPCLK_GATE,
(I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE));
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
/*
* Based on the document from hardware guys the following bits
* should be set unconditionally in order to enable FBC.
* The bit 22 of 0x42000
* The bit 22 of 0x42004
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPFC_DIS1 |
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
 
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
}
 
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
I915_WRITE(GEN6_UCGCTL1,
I915_READ(GEN6_UCGCTL1) |
GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
GEN6_CSUNIT_CLOCK_GATE_DISABLE);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
* Also apply WaDisableVDSUnitClockGating and
* WaDisableRCPBUnitClockGating.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
/* Bspec says we need to always set all mask bits. */
I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
 
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
* The bit21 and bit22 of 0x42000
* The bit21 and bit22 of 0x42004
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
 
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
 
/* The default value should be 0x200 according to docs, but the two
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
}
 
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
 
reg &= ~GEN7_FF_SCHED_MASK;
reg |= GEN7_FF_TS_SCHED_HW;
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
 
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
 
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
 
gen7_setup_fixed_func_scheduler(dev_priv);
 
/* WaDisable4x2SubspanOptimization */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
I915_WRITE(WM_DBG,
I915_READ(WM_DBG) |
WM_DBG_DISALLOW_MULTIPLE_LP |
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
 
}
 
static void ivybridge_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
I915_WRITE(GEN7_L3CNTLREG1,
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
 
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
gen7_setup_fixed_func_scheduler(dev_priv);
 
/* WaDisable4x2SubspanOptimization */
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
}
 
static void valleyview_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
* some amount of runtime in the Mesa "fire" demo, and Unigine
* Sanctuary and Tropics, and apparently anything else with
* alpha test or pixel discard.
*
* According to the spec, bit 11 (RCCUNIT) must also be set,
* but we didn't debug actual testcases to find it out.
*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating workaround.
*
* Also apply WaDisableVDSUnitClockGating and
* WaDisableRCPBUnitClockGating.
*/
I915_WRITE(GEN6_UCGCTL2,
GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
 
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/*
* On ValleyView, the GUnit needs to signal the GT
* when flip and other events complete. So enable
* all the GUnit->GT interrupts here
*/
I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
}
 
static void g4x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate;
 
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
GS_UNIT_CLOCK_GATE_DISABLE |
CL_UNIT_CLOCK_GATE_DISABLE);
I915_WRITE(RAMCLK_GATE_D, 0);
dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
OVRUNIT_CLOCK_GATE_DISABLE |
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
}
 
static void crestline_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(DSPCLK_GATE_D, 0);
I915_WRITE(RAMCLK_GATE_D, 0);
I915_WRITE16(DEUC, 0);
}
 
static void broadwater_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
I965_RCC_CLOCK_GATE_DISABLE |
I965_RCPB_CLOCK_GATE_DISABLE |
I965_ISC_CLOCK_GATE_DISABLE |
I965_FBC_CLOCK_GATE_DISABLE);
I915_WRITE(RENCLK_GATE_D2, 0);
}
 
static void gen3_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dstate = I915_READ(D_STATE);
 
dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
DSTATE_DOT_CLOCK_GATING;
I915_WRITE(D_STATE, dstate);
 
if (IS_PINEVIEW(dev))
I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
 
/* IIR "flip pending" means done if this bit is set */
I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
}
 
static void i85x_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
}
 
static void i830_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
 
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
 
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* Without this, mode sets may fail silently on FDI */
for_each_pipe(pipe)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
 
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->display.init_clock_gating(dev);
 
if (dev_priv->display.init_pch_clock_gating)
dev_priv->display.init_pch_clock_gating(dev);
}
 
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
void intel_init_power_wells(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long power_wells[] = {
HSW_PWR_WELL_CTL1,
HSW_PWR_WELL_CTL2,
HSW_PWR_WELL_CTL4
};
int i;
 
if (!IS_HASWELL(dev))
return;
 
mutex_lock(&dev->struct_mutex);
 
for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
int well = I915_READ(power_wells[i]);
 
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
 
mutex_unlock(&dev->struct_mutex);
}
 
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
} else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
} else if (IS_CRESTLINE(dev)) {
dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
dev_priv->display.enable_fbc = i8xx_enable_fbc;
dev_priv->display.disable_fbc = i8xx_disable_fbc;
}
/* 855GM needs testing */
}
 
/* For cxsr */
if (IS_PINEVIEW(dev))
i915_pineview_get_mem_freq(dev);
else if (IS_GEN5(dev))
i915_ironlake_get_mem_freq(dev);
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
 
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else if (IS_HASWELL(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
dev_priv->display.init_clock_gating = haswell_init_clock_gating;
} else
dev_priv->display.update_wm = NULL;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.update_wm = valleyview_update_wm;
dev_priv->display.init_clock_gating =
valleyview_init_clock_gating;
} else if (IS_PINEVIEW(dev)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
DRM_INFO("failed to find known CxSR latency "
"(found ddr%s fsb freq %d, mem freq %d), "
"disabling CxSR\n",
(dev_priv->is_ddr3 == 1) ? "3" : "2",
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
pineview_disable_cxsr(dev);
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_G4X(dev)) {
dev_priv->display.update_wm = g4x_update_wm;
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
} else if (IS_GEN4(dev)) {
dev_priv->display.update_wm = i965_update_wm;
if (IS_CRESTLINE(dev))
dev_priv->display.init_clock_gating = crestline_init_clock_gating;
else if (IS_BROADWATER(dev))
dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
} else if (IS_GEN3(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
} else if (IS_I865G(dev)) {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
dev_priv->display.get_fifo_size = i830_get_fifo_size;
} else if (IS_I85X(dev)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i85x_get_fifo_size;
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
} else {
dev_priv->display.update_wm = i830_update_wm;
dev_priv->display.init_clock_gating = i830_init_clock_gating;
if (IS_845G(dev))
dev_priv->display.get_fifo_size = i845_get_fifo_size;
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
}
 
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
u32 gt_thread_status_mask;
 
if (IS_HASWELL(dev_priv->dev))
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
else
gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
 
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_ACK;
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE, 1);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
 
if (IS_HASWELL(dev_priv->dev))
forcewake_ack = FORCEWAKE_ACK_HSW;
else
forcewake_ack = FORCEWAKE_MT_ACK;
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
* be called at the beginning of the sequence followed by a call to
* gen6_gt_force_wake_put() at the end of the sequence.
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (dev_priv->forcewake_count++ == 0)
dev_priv->gt.force_wake_get(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
{
u32 gtfifodbg;
gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
"MMIO read or write has been dropped %x\n", gtfifodbg))
I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
}
 
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
 
/*
* see gen6_gt_force_wake_get()
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
unsigned long irqflags;
 
spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
if (--dev_priv->forcewake_count == 0)
dev_priv->gt.force_wake_put(dev_priv);
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
 
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{
int ret = 0;
 
if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
int loop = 500;
u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
udelay(10);
fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
}
if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
++ret;
dev_priv->gt_fifo_count = fifo;
}
dev_priv->gt_fifo_count--;
 
return ret;
}
 
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
 
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
 
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
gen6_gt_check_fifodbg(dev_priv);
}
 
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
spin_lock_init(&dev_priv->gt_lock);
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
} else if (INTEL_INFO(dev)->gen >= 6) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
 
/* IVB configs may use multi-threaded forcewake */
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
u32 ecobus;
 
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = I915_READ_NOTRACE(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
DRM_DEBUG_KMS("Using MT version of forcewake\n");
dev_priv->gt.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put =
__gen6_gt_force_wake_mt_put;
}
}
}
}
 
/drivers/video/drm/i915/intel_ringbuffer.c
29,10 → 29,9
#define iowrite32(v, addr) writel((v), (addr))
#define ioread32(addr) readl(addr)
 
#include "drmP.h"
#include "drm.h"
#include <drm/drmP.h>
#include "i915_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_trace.h"
#include "intel_drv.h"
 
54,22 → 53,34
return space;
}
 
static u32 i915_gem_get_seqno(struct drm_device *dev)
static int
gen2_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno;
u32 cmd;
int ret;
 
seqno = dev_priv->next_seqno;
cmd = MI_FLUSH;
if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
cmd |= MI_NO_WRITE_FLUSH;
 
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
 
return seqno;
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
 
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
 
return 0;
}
 
static int
render_ring_flush(struct intel_ring_buffer *ring,
gen4_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
106,17 → 117,8
*/
 
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
if ((invalidate_domains|flush_domains) &
I915_GEM_DOMAIN_RENDER)
if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
cmd &= ~MI_NO_WRITE_FLUSH;
if (INTEL_INFO(dev)->gen < 4) {
/*
* On the 965, the sampler cache always gets flushed
* and this bit is reserved.
*/
if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
cmd |= MI_READ_FLUSH;
}
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
 
218,30 → 220,121
int ret;
 
/* Force SNB workarounds for PIPE_CONTROL flushes */
intel_emit_post_sync_nonzero_flush(ring);
ret = intel_emit_post_sync_nonzero_flush(ring);
if (ret)
return ret;
 
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
/*
* Ensure that any following seqno writes only happen
* when the render cache is indeed flushed.
*/
flags |= PIPE_CONTROL_CS_STALL;
}
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
}
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
return 0;
}
 
static int
gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
{
int ret;
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_STALL_AT_SCOREBOARD);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
return 0;
}
 
static int
gen7_render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains, u32 flush_domains)
{
u32 flags = 0;
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
 
/*
* Ensure that any following seqno writes only happen when the render
* cache is indeed flushed.
*
* Workaround: 4th PIPE_CONTROL command (except the ones with only
* read-cache invalidate bits set) must have the CS_STALL bit set. We
* don't try to be clever and just set it unconditionally.
*/
flags |= PIPE_CONTROL_CS_STALL;
 
/* Just flush everything. Experiments have shown that reducing the
* number of bits based on the write domains has little performance
* impact.
*/
if (flush_domains) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
}
if (invalidate_domains) {
flags |= PIPE_CONTROL_TLB_INVALIDATE;
flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
 
ret = intel_ring_begin(ring, 6);
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
gen7_render_ring_cs_stall_wa(ring);
}
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, 0); /* lower dword */
intel_ring_emit(ring, 0); /* uppwer dword */
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
return 0;
265,17 → 358,20
 
static int init_ring_common(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
int ret = 0;
u32 head;
 
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_get(dev_priv);
 
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
 
/* Initialize the ring. */
I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
/* G45 ring initialization fails to reset head to zero */
301,14 → 397,19
}
}
 
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_REPORT_64K | RING_VALID);
| RING_VALID);
 
/* If the head is still not zero, the ring is dead */
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
I915_READ_START(ring) != obj->gtt_offset ||
(I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
I915_READ_START(ring) == obj->gtt_offset &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
316,15 → 417,20
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
return -EIO;
ret = -EIO;
goto out;
}
 
ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring_space(ring);
ring->last_retired_head = -1;
 
out:
if (HAS_FORCE_WAKE(dev))
gen6_gt_force_wake_put(dev_priv);
 
return 0;
return ret;
}
 
static int
350,12 → 456,12
 
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
ret = i915_gem_object_pin(obj, 4096, true);
ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
 
pc->gtt_offset = obj->gtt_offset;
pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages[0], 4096, PG_SW);
pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW);
if (pc->cpu_page == NULL)
goto err_unpin;
 
397,14 → 503,11
int ret = init_ring_common(ring);
 
if (INTEL_INFO(dev)->gen > 3) {
int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev) || IS_GEN7(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
GFX_MODE_ENABLE(GFX_REPLAY_MODE));
_MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
 
if (INTEL_INFO(dev)->gen >= 5) {
413,11 → 516,29
return ret;
}
 
if (INTEL_INFO(dev)->gen >= 6) {
I915_WRITE(INSTPM,
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
* policy is not supported."
*/
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
 
/* This is not explicitly set for GEN6, so read the register.
* see intel_ring_mi_set_context() for why we care.
* TODO: consider explicitly setting the bit for GEN5
*/
ring->itlb_before_ctx_switch =
!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
}
 
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
 
if (HAS_L3_GPU_CACHE(dev))
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
 
return ret;
}
 
466,7 → 587,7
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
 
*seqno = i915_gem_get_seqno(ring->dev);
*seqno = i915_gem_next_request_seqno(ring);
 
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
487,9 → 608,8
* @seqno - seqno which the waiter will block on
*/
static int
intel_ring_sync(struct intel_ring_buffer *waiter,
gen6_ring_sync(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
int ring,
u32 seqno)
{
int ret;
497,11 → 617,21
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
 
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
seqno -= 1;
 
WARN_ON(signaller->semaphore_register[waiter->id] ==
MI_SEMAPHORE_SYNC_INVALID);
 
ret = intel_ring_begin(waiter, 4);
if (ret)
return ret;
 
intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
intel_ring_emit(waiter,
dw1 | signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
510,47 → 640,6
return 0;
}
 
/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
int
render_ring_sync_to(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
u32 seqno)
{
WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
return intel_ring_sync(waiter,
signaller,
RCS,
seqno);
}
 
/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
int
gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
u32 seqno)
{
WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
return intel_ring_sync(waiter,
signaller,
VCS,
seqno);
}
 
/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
int
gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
struct intel_ring_buffer *signaller,
u32 seqno)
{
WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
return intel_ring_sync(waiter,
signaller,
BCS,
seqno);
}
 
 
 
#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
564,8 → 653,7
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
struct drm_device *dev = ring->dev;
u32 seqno = i915_gem_get_seqno(dev);
u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
599,6 → 687,7
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128;
PIPE_CONTROL_FLUSH(ring, scratch_addr);
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
612,124 → 701,139
return 0;
}
 
static int
render_ring_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
struct drm_device *dev = ring->dev;
u32 seqno = i915_gem_get_seqno(dev);
int ret;
 
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
 
*result = seqno;
return 0;
}
 
static u32
gen6_ring_get_seqno(struct intel_ring_buffer *ring)
gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct drm_device *dev = ring->dev;
 
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page. */
if (IS_GEN7(dev))
if (!lazy_coherency)
intel_ring_get_active_head(ring);
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static u32
ring_get_seqno(struct intel_ring_buffer *ring)
ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring)
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
struct pipe_control *pc = ring->private;
return pc->cpu_page[0];
}
 
static void
ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
dev_priv->gt_irq_mask &= ~mask;
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void
ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
gen5_ring_put_irq(struct intel_ring_buffer *ring)
{
dev_priv->gt_irq_mask |= mask;
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
static bool
i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
dev_priv->irq_mask &= ~mask;
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return false;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
dev_priv->irq_mask |= mask;
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
static bool
render_ring_get_irq(struct intel_ring_buffer *ring)
i8xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return false;
 
spin_lock(&ring->irq_lock);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
if (HAS_PCH_SPLIT(dev))
ironlake_enable_irq(dev_priv,
GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
spin_unlock(&ring->irq_lock);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void
render_ring_put_irq(struct intel_ring_buffer *ring)
i8xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock(&ring->irq_lock);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
if (HAS_PCH_SPLIT(dev))
ironlake_disable_irq(dev_priv,
GT_USER_INTERRUPT |
GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
}
spin_unlock(&ring->irq_lock);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
 
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
743,13 → 847,13
*/
if (IS_GEN7(dev)) {
switch (ring->id) {
case RING_RENDER:
case RCS:
mmio = RENDER_HWS_PGA_GEN7;
break;
case RING_BLT:
case BCS:
mmio = BLT_HWS_PGA_GEN7;
break;
case RING_BSD:
case VCS:
mmio = BSD_HWS_PGA_GEN7;
break;
}
781,7 → 885,7
}
 
static int
ring_add_request(struct intel_ring_buffer *ring,
i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
{
u32 seqno;
791,7 → 895,7
if (ret)
return ret;
 
seqno = i915_gem_get_seqno(ring->dev);
seqno = i915_gem_next_request_seqno(ring);
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
804,10 → 908,11
}
 
static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
gen6_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
if (!dev->irq_enabled)
return false;
815,76 → 920,48
/* It looks like we need to prevent the gt from suspending while waiting
* for an notifiy irq, otherwise irqs seem to get lost on at least the
* blt/bsd rings on ivb. */
if (IS_GEN7(dev))
gen6_gt_force_wake_get(dev_priv);
 
spin_lock(&ring->irq_lock);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_enable_irq(dev_priv, gflag);
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
GEN6_RENDER_L3_PARITY_ERROR));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock(&ring->irq_lock);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
return true;
}
 
static void
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
gen6_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long flags;
 
spin_lock(&ring->irq_lock);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
if (--ring->irq_refcount == 0) {
ring->irq_mask |= rflag;
I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_disable_irq(dev_priv, gflag);
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
else
I915_WRITE_IMR(ring, ~0);
dev_priv->gt_irq_mask |= ring->irq_enable_mask;
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
POSTING_READ(GTIMR);
}
spin_unlock(&ring->irq_lock);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
if (IS_GEN7(dev))
gen6_gt_force_wake_put(dev_priv);
}
 
static bool
bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
 
if (!dev->irq_enabled)
return false;
 
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
if (IS_G4X(dev))
i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
else
ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
}
spin_unlock(&ring->irq_lock);
 
return true;
}
static void
bsd_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
 
spin_lock(&ring->irq_lock);
if (--ring->irq_refcount == 0) {
if (IS_G4X(dev))
i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
else
ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
}
spin_unlock(&ring->irq_lock);
}
 
static int
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
int ret;
 
893,7 → 970,8
return ret;
 
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | (2 << 6) |
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
MI_BATCH_NON_SECURE_I965);
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
902,13 → 980,11
}
 
static int
render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
struct drm_device *dev = ring->dev;
int ret;
 
if (IS_I830(dev) || IS_845G(dev)) {
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
917,22 → 993,23
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, 0);
} else {
intel_ring_advance(ring);
 
return 0;
}
 
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
{
int ret;
 
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
 
if (INTEL_INFO(dev)->gen >= 4) {
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | (2 << 6) |
MI_BATCH_NON_SECURE_I965);
intel_ring_emit(ring, offset);
} else {
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | (2 << 6));
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
}
}
intel_ring_advance(ring);
 
return 0;
940,7 → 1017,6
 
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj;
 
obj = ring->status_page.obj;
947,18 → 1023,15
if (obj == NULL)
return;
 
kunmap(obj->pages[0]);
// kunmap(sg_page(obj->pages->sgl));
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
 
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
 
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
 
971,15 → 1044,15
 
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
ret = i915_gem_object_pin(obj, 4096, true);
ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
 
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = (void*)MapIoMem((addr_t)obj->pages[0], 4096, PG_SW);
ring->status_page.page_addr = (void*)MapIoMem(obj->pages.page[0],4096,PG_SW);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
ret = -ENOMEM;
goto err_unpin;
}
ring->status_page.obj = obj;
999,20 → 1072,19
return ret;
}
 
int intel_init_ring_buffer(struct drm_device *dev,
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
INIT_LIST_HEAD(&ring->gpu_write_list);
ring->size = 32 * PAGE_SIZE;
 
init_waitqueue_head(&ring->irq_queue);
spin_lock_init(&ring->irq_lock);
ring->irq_mask = ~0;
 
if (I915_NEED_GFX_HWS(dev)) {
ret = init_status_page(ring);
1029,27 → 1101,23
 
ring->obj = obj;
 
ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
 
ring->map.size = ring->size;
ring->map.offset = get_bus_addr() + obj->gtt_offset;
ring->map.type = 0;
ring->map.flags = 0;
ring->map.mtrr = 0;
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
goto err_unpin;
 
// drm_core_ioremap_wc(&ring->map, dev);
 
ring->map.handle = ioremap(ring->map.offset, ring->map.size);
 
if (ring->map.handle == NULL) {
ring->virtual_start =
ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
ret = -EINVAL;
goto err_unpin;
}
 
ring->virtual_start = ring->map.handle;
ret = ring->init(ring);
if (ret)
goto err_unmap;
1059,7 → 1127,7
* of the buffer.
*/
ring->effective_size = ring->size;
if (IS_I830(ring->dev))
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
 
return 0;
1107,7 → 1175,7
 
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
unsigned int *virt;
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
 
ENTER();
1118,12 → 1186,10
return ret;
}
 
virt = (unsigned int *)(ring->virtual_start + ring->tail);
rem /= 8;
while (rem--) {
*virt++ = MI_NOOP;
*virt++ = MI_NOOP;
}
virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
 
ring->tail = 0;
ring->space = ring_space(ring);
1132,26 → 1198,93
return 0;
}
 
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
u32 head;
int ret;
 
/* If the reported head position has wrapped or hasn't advanced,
* fallback to the slow and accurate path.
*/
head = intel_read_status_page(ring, 4);
if (head > ring->head) {
ring->head = head;
ret = i915_wait_seqno(ring, seqno);
if (!ret)
i915_gem_retire_requests_ring(ring);
 
return ret;
}
 
static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
{
struct drm_i915_gem_request *request;
u32 seqno = 0;
int ret;
 
i915_gem_retire_requests_ring(ring);
 
if (ring->last_retired_head != -1) {
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
if (ring->space >= n)
return 0;
}
 
list_for_each_entry(request, &ring->request_list, list) {
int space;
 
end = jiffies + 3 * HZ;
if (request->tail == -1)
continue;
 
space = request->tail - (ring->tail + 8);
if (space < 0)
space += ring->size;
if (space >= n) {
seqno = request->seqno;
break;
}
 
/* Consume this request in case we need more space than
* is available and so need to prevent a race between
* updating last_retired_head and direct reads of
* I915_RING_HEAD. It also provides a nice sanity check.
*/
request->tail = -1;
}
 
if (seqno == 0)
return -ENOSPC;
 
ret = intel_ring_wait_seqno(ring, seqno);
if (ret)
return ret;
 
if (WARN_ON(ring->last_retired_head == -1))
return -ENOSPC;
 
ring->head = ring->last_retired_head;
ring->last_retired_head = -1;
ring->space = ring_space(ring);
if (WARN_ON(ring->space < n))
return -ENOSPC;
 
return 0;
}
 
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
int ret;
 
ret = intel_ring_wait_request(ring, n);
if (ret != -ENOSPC)
return ret;
 
 
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
* timers don't work yet, so prevent a complete hang in that
* case by choosing an insanely large timeout. */
end = GetTimerTicks() + 60 * HZ;
 
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
1161,9 → 1294,11
}
 
msleep(1);
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
} while (!time_after(jiffies, end));
 
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(GetTimerTicks(), end));
trace_i915_ring_wait_end(ring);
return -EBUSY;
}
1171,12 → 1306,13
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
 
if (unlikely(atomic_read(&dev_priv->mm.wedged)))
return -EIO;
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
if (ret)
return ret;
 
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
1196,49 → 1332,15
 
void intel_ring_advance(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
ring->tail &= ring->size - 1;
if (dev_priv->stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
 
static const struct intel_ring_buffer render_ring = {
.name = "render ring",
.id = RING_RENDER,
.mmio_base = RENDER_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_render_ring,
.write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
.get_seqno = ring_get_seqno,
.irq_get = render_ring_get_irq,
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
// .cleanup = render_ring_cleanup,
.sync_to = render_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
MI_SEMAPHORE_SYNC_RV,
MI_SEMAPHORE_SYNC_RB},
.signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
};
 
/* ring buffer for bit-stream decoder */
 
static const struct intel_ring_buffer bsd_ring = {
.name = "bsd ring",
.id = RING_BSD,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
.write_tail = ring_write_tail,
.flush = bsd_ring_flush,
.add_request = ring_add_request,
.get_seqno = ring_get_seqno,
.irq_get = bsd_ring_get_irq,
.irq_put = bsd_ring_put_irq,
.dispatch_execbuffer = ring_dispatch_execbuffer,
};
 
 
static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
1245,20 → 1347,31
drm_i915_private_t *dev_priv = ring->dev->dev_private;
 
/* Every tail move must follow the sequence below */
 
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
I915_WRITE(GEN6_BSD_RNCID, 0x0);
_MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
/* Clear the context id. Here be magic! */
I915_WRITE64(GEN6_BSD_RNCID, 0x0);
 
/* Wait for the ring not to be idle, i.e. for it to wake up. */
if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
GEN6_BSD_SLEEP_INDICATOR) == 0,
50))
DRM_ERROR("timed out waiting for IDLE Indicator\n");
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
/* Now that the ring is fully powered up, update the tail */
I915_WRITE_TAIL(ring, value);
POSTING_READ(RING_TAIL(ring->mmio_base));
 
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
_MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
}
 
static int gen6_ring_flush(struct intel_ring_buffer *ring,
1300,148 → 1413,8
return 0;
}
 
static bool
gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_USER_INTERRUPT,
GEN6_RENDER_USER_INTERRUPT);
}
 
static void
gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_put_irq(ring,
GT_USER_INTERRUPT,
GEN6_RENDER_USER_INTERRUPT);
}
 
static bool
gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_GEN6_BSD_USER_INTERRUPT,
GEN6_BSD_USER_INTERRUPT);
}
 
static void
gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_put_irq(ring,
GT_GEN6_BSD_USER_INTERRUPT,
GEN6_BSD_USER_INTERRUPT);
}
 
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
.name = "gen6 bsd ring",
.id = RING_BSD,
.mmio_base = GEN6_BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = init_ring_common,
.write_tail = gen6_bsd_ring_write_tail,
.flush = gen6_ring_flush,
.add_request = gen6_add_request,
.get_seqno = gen6_ring_get_seqno,
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.sync_to = gen6_bsd_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_VR,
MI_SEMAPHORE_SYNC_INVALID,
MI_SEMAPHORE_SYNC_VB},
.signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
};
 
/* Blitter support (SandyBridge+) */
 
static bool
blt_ring_get_irq(struct intel_ring_buffer *ring)
{
return gen6_ring_get_irq(ring,
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
}
 
static void
blt_ring_put_irq(struct intel_ring_buffer *ring)
{
gen6_ring_put_irq(ring,
GT_BLT_USER_INTERRUPT,
GEN6_BLITTER_USER_INTERRUPT);
}
 
 
/* Workaround for some stepping of SNB,
* each time when BLT engine ring tail moved,
* the first command in the ring to be parsed
* should be MI_BATCH_BUFFER_START
*/
#define NEED_BLT_WORKAROUND(dev) \
(IS_GEN6(dev) && (dev->pdev->revision < 8))
 
static inline struct drm_i915_gem_object *
to_blt_workaround(struct intel_ring_buffer *ring)
{
return ring->private;
}
 
static int blt_ring_init(struct intel_ring_buffer *ring)
{
if (NEED_BLT_WORKAROUND(ring->dev)) {
struct drm_i915_gem_object *obj;
u32 *ptr;
int ret;
 
obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL)
return -ENOMEM;
 
ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
 
ptr = (void*)MapIoMem((addr_t)obj->pages[0], 4096, PG_SW);
obj->mapped = ptr;
 
*ptr++ = MI_BATCH_BUFFER_END;
*ptr++ = MI_NOOP;
 
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
FreeKernelSpace(ptr);
obj->mapped = NULL;
return ret;
}
FreeKernelSpace(ptr);
obj->mapped = NULL;
 
ring->private = obj;
}
 
return init_ring_common(ring);
}
 
static int blt_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
if (ring->private) {
int ret = intel_ring_begin(ring, num_dwords+2);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER_START);
intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
 
return 0;
} else
return intel_ring_begin(ring, 4);
}
 
static int blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate, u32 flush)
{
1448,7 → 1421,7
uint32_t cmd;
int ret;
 
ret = blt_ring_begin(ring, 4);
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
1463,54 → 1436,66
return 0;
}
 
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
{
if (!ring->private)
return;
 
i915_gem_object_unpin(ring->private);
drm_gem_object_unreference(ring->private);
ring->private = NULL;
}
 
static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring",
.id = RING_BLT,
.mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE,
.init = blt_ring_init,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
.add_request = gen6_add_request,
.get_seqno = gen6_ring_get_seqno,
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
// .cleanup = blt_ring_cleanup,
.sync_to = gen6_blt_ring_sync_to,
.semaphore_register = {MI_SEMAPHORE_SYNC_BR,
MI_SEMAPHORE_SYNC_BV,
MI_SEMAPHORE_SYNC_INVALID},
.signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
};
 
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
*ring = render_ring;
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
 
if (INTEL_INFO(dev)->gen >= 6) {
ring->add_request = gen6_add_request;
ring->flush = gen7_render_ring_flush;
if (INTEL_INFO(dev)->gen == 6)
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
ring->signal_mbox[0] = GEN6_VRSYNC;
ring->signal_mbox[1] = GEN6_BRSYNC;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
} else {
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
ring->flush = gen2_render_ring_flush;
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
} else {
ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = i9xx_ring_put_irq;
}
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
 
 
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1525,11 → 1510,47
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
 
if (IS_GEN6(dev) || IS_GEN7(dev))
*ring = gen6_bsd_ring;
else
*ring = bsd_ring;
ring->name = "bsd ring";
ring->id = VCS;
 
ring->write_tail = ring_write_tail;
if (IS_GEN6(dev) || IS_GEN7(dev)) {
ring->mmio_base = GEN6_BSD_RING_BASE;
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev))
ring->write_tail = gen6_bsd_ring_write_tail;
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
ring->signal_mbox[0] = GEN6_RVSYNC;
ring->signal_mbox[1] = GEN6_BVSYNC;
} else {
ring->mmio_base = BSD_RING_BASE;
ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
} else {
ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = i9xx_ring_put_irq;
}
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
}
ring->init = init_ring_common;
 
 
return intel_init_ring_buffer(dev, ring);
}
 
1538,7 → 1559,63
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 
*ring = gen6_blt_ring;
ring->name = "blitter ring";
ring->id = BCS;
 
ring->mmio_base = BLT_RING_BASE;
ring->write_tail = ring_write_tail;
ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
ring->signal_mbox[0] = GEN6_RBSYNC;
ring->signal_mbox[1] = GEN6_VBSYNC;
ring->init = init_ring_common;
 
return intel_init_ring_buffer(dev, ring);
}
 
int
intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
{
int ret;
 
if (!ring->gpu_caches_dirty)
return 0;
 
ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
if (ret)
return ret;
 
trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
 
ring->gpu_caches_dirty = false;
return 0;
}
 
int
intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
{
uint32_t flush_domains;
int ret;
 
flush_domains = 0;
if (ring->gpu_caches_dirty)
flush_domains = I915_GEM_GPU_DOMAINS;
 
ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
if (ret)
return ret;
 
trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
 
ring->gpu_caches_dirty = false;
return 0;
}
/drivers/video/drm/i915/intel_ringbuffer.h
1,15 → 1,8
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
 
enum {
RCS = 0x0,
VCS,
BCS,
I915_NUM_RINGS,
};
 
struct intel_hw_status_page {
u32 __iomem *page_addr;
u32 *page_addr;
unsigned int gfx_addr;
struct drm_i915_gem_object *obj;
};
36,10 → 29,11
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
RING_RENDER = 0x1,
RING_BSD = 0x2,
RING_BLT = 0x4,
RCS = 0x0,
VCS,
BCS,
} id;
#define I915_NUM_RINGS 3
u32 mmio_base;
void __iomem *virtual_start;
struct drm_device *dev;
52,12 → 46,19
int effective_size;
struct intel_hw_status_page status_page;
 
spinlock_t irq_lock;
u32 irq_refcount;
u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position.
*
* last_retired_head is set to -1 after the value is consumed so
* we can detect new retirements.
*/
u32 last_retired_head;
 
u32 irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring);
71,7 → 72,14
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
u32 (*get_seqno)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
* seen value is good enough. Note that the seqno will always be
* monotonic, even if not coherent.
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
100,25 → 108,35
struct list_head request_list;
 
/**
* List of objects currently pending a GPU write flush.
*
* All elements on this list will belong to either the
* active_list or flushing_list, last_rendering_seqno can
* be used to differentiate between the two elements.
*/
struct list_head gpu_write_list;
 
/**
* Do we have some not yet emitted requests outstanding?
*/
u32 outstanding_lazy_request;
bool gpu_caches_dirty;
 
wait_queue_head_t irq_queue;
drm_local_map_t map;
 
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
*/
bool itlb_before_ctx_switch;
struct i915_hw_context *default_context;
struct drm_i915_gem_object *last_context_obj;
 
void *private;
};
 
static inline bool
intel_ring_initialized(struct intel_ring_buffer *ring)
{
return ring->obj != NULL;
}
 
static inline unsigned
intel_ring_flag(struct intel_ring_buffer *ring)
{
return 1 << ring->id;
}
 
static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
142,7 → 160,9
intel_read_status_page(struct intel_ring_buffer *ring,
int reg)
{
return ioread32(ring->status_page.page_addr + reg);
/* Ensure that the compiler doesn't optimize away the load. */
barrier();
return ring->status_page.page_addr[reg];
}
 
/**
160,10 → 180,7
*
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
 
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
185,6 → 202,8
void intel_ring_advance(struct intel_ring_buffer *ring);
 
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
193,6 → 212,11
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
{
return ring->tail;
}
 
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
/drivers/video/drm/i915/intel_sdvo.c
28,12 → 28,12
#include <linux/i2c.h>
#include <linux/slab.h>
//#include <linux/delay.h>
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_edid.h"
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
 
49,7 → 49,7
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_YPRPB0)
 
#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
SDVO_TV_MASK)
82,7 → 82,7
struct i2c_adapter ddc;
 
/* Register for the SDVO device: SDVOB or SDVOC */
int sdvo_reg;
uint32_t sdvo_reg;
 
/* Active outputs controlled by this SDVO output */
uint16_t controlled_output;
105,7 → 105,7
/*
* Hotplug activation bits for this device
*/
uint8_t hotplug_active[2];
uint16_t hotplug_active;
 
/**
* This is used to select the color range of RBG outputs in HDMI mode.
122,6 → 122,9
*/
bool is_tv;
 
/* On different gens SDVOB is at different places. */
bool is_sdvob;
 
/* This is for current tv format name */
int tv_format_index;
 
146,8 → 149,10
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
 
/* Input timings for adjusted_mode */
struct intel_sdvo_dtd input_dtd;
/*
* the sdvo flag gets lost in round trip: dtd->adjusted_mode->dtd
*/
uint8_t dtd_sdvo_flags;
};
 
struct intel_sdvo_connector {
156,7 → 161,7
/* Mark the type of connector */
uint16_t output_flag;
 
int force_audio;
enum hdmi_force_audio force_audio;
 
/* This contains all current supported TV format */
u8 tv_format_supported[TV_FORMAT_NUM];
411,8 → 416,7
SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
};
 
#define IS_SDVOB(reg) (reg == SDVOB || reg == PCH_SDVOB)
#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
#define SDVO_NAME(svdo) ((svdo)->is_sdvob ? "SDVOB" : "SDVOC")
 
static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
449,10 → 453,21
static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
const void *args, int args_len)
{
u8 buf[args_len*2 + 2], status;
struct i2c_msg msgs[args_len + 3];
int i, ret;
u8 *buf, status;
struct i2c_msg *msgs;
int i, ret = true;
 
/* Would be simpler to allocate both in one go ? */
buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
if (!buf)
return false;
 
msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
if (!msgs) {
kfree(buf);
return false;
}
 
intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
 
for (i = 0; i < args_len; i++) {
485,15 → 500,19
ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
if (ret < 0) {
DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
return false;
ret = false;
goto out;
}
if (ret != i+3) {
/* failure in I2C transfer */
DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
return false;
ret = false;
}
 
return true;
out:
kfree(msgs);
kfree(buf);
return ret;
}
 
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
622,6 → 641,14
&outputs, sizeof(outputs));
}
 
static bool intel_sdvo_get_active_outputs(struct intel_sdvo *intel_sdvo,
u16 *outputs)
{
return intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ACTIVE_OUTPUTS,
outputs, sizeof(*outputs));
}
 
static bool intel_sdvo_set_encoder_power_state(struct intel_sdvo *intel_sdvo,
int mode)
{
739,21 → 766,26
uint16_t width, height;
uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
uint16_t h_sync_offset, v_sync_offset;
int mode_clock;
 
width = mode->crtc_hdisplay;
height = mode->crtc_vdisplay;
width = mode->hdisplay;
height = mode->vdisplay;
 
/* do some mode translations */
h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
h_blank_len = mode->htotal - mode->hdisplay;
h_sync_len = mode->hsync_end - mode->hsync_start;
 
v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
v_blank_len = mode->vtotal - mode->vdisplay;
v_sync_len = mode->vsync_end - mode->vsync_start;
 
h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
h_sync_offset = mode->hsync_start - mode->hdisplay;
v_sync_offset = mode->vsync_start - mode->vdisplay;
 
dtd->part1.clock = mode->clock / 10;
mode_clock = mode->clock;
mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1;
mode_clock /= 10;
dtd->part1.clock = mode_clock;
 
dtd->part1.h_active = width & 0xff;
dtd->part1.h_blank = h_blank_len & 0xff;
dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
772,10 → 804,12
((v_sync_len & 0x30) >> 4);
 
dtd->part2.dtd_flags = 0x18;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
dtd->part2.dtd_flags |= 0x2;
dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
dtd->part2.dtd_flags |= 0x4;
dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
 
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
809,9 → 843,11
mode->clock = dtd->part1.clock * 10;
 
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
if (dtd->part2.dtd_flags & 0x2)
if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
if (dtd->part2.dtd_flags & 0x4)
if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
 
867,31 → 903,38
}
#endif
 
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
unsigned if_index, uint8_t tx_rate,
uint8_t *data, unsigned length)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
uint8_t set_buf_index[2] = { 1, 0 };
uint64_t *data = (uint64_t *)&avi_if;
unsigned i;
uint8_t set_buf_index[2] = { if_index, 0 };
uint8_t hbuf_size, tmp[8];
int i;
 
intel_dip_infoframe_csum(&avi_if);
 
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_INDEX,
set_buf_index, 2))
return false;
 
for (i = 0; i < sizeof(avi_if); i += 8) {
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
&hbuf_size, 1))
return false;
 
/* Buffer size is 0 based, hooray! */
hbuf_size++;
 
DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
if_index, length, hbuf_size);
 
for (i = 0; i < hbuf_size; i += 8) {
memset(tmp, 0, 8);
if (i < length)
memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
 
if (!intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_SET_HBUF_DATA,
data, 8))
tmp, 8))
return false;
data++;
}
 
return intel_sdvo_set_value(intel_sdvo,
899,6 → 942,28
&tx_rate, 1);
}
 
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
.len = DIP_LEN_AVI,
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
 
intel_dip_infoframe_csum(&avi_if);
 
/* sdvo spec says that the ecc is handled by the hw, and it looks like
* we must not send the ecc field, either. */
memcpy(sdvo_data, &avi_if, 3);
sdvo_data[3] = avi_if.checksum;
memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
 
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
sdvo_data, sizeof(sdvo_data));
}
 
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
{
struct intel_sdvo_tv_format format;
916,7 → 981,7
 
static bool
intel_sdvo_set_output_timings_from_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct intel_sdvo_dtd output_dtd;
 
931,11 → 996,15
return true;
}
 
/* Asks the sdvo controller for the preferred input mode given the output mode.
* Unfortunately we have to set up the full output mode to do that. */
static bool
intel_sdvo_set_input_timings_for_mode(struct intel_sdvo *intel_sdvo,
struct drm_display_mode *mode,
intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo_dtd input_dtd;
 
/* Reset the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return false;
947,17 → 1016,17
return false;
 
if (!intel_sdvo_get_preferred_input_timing(intel_sdvo,
&intel_sdvo->input_dtd))
&input_dtd))
return false;
 
intel_sdvo_get_mode_from_dtd(adjusted_mode, &intel_sdvo->input_dtd);
intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
intel_sdvo->dtd_sdvo_flags = input_dtd.part2.sdvo_flags;
 
drm_mode_set_crtcinfo(adjusted_mode, 0);
return true;
}
 
static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
972,7 → 1041,7
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo, mode))
return false;
 
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
} else if (intel_sdvo->is_lvds) {
980,7 → 1049,7
intel_sdvo->sdvo_lvds_fixed_mode))
return false;
 
(void) intel_sdvo_set_input_timings_for_mode(intel_sdvo,
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
mode,
adjusted_mode);
}
1005,7 → 1074,7
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd;
struct intel_sdvo_dtd input_dtd, output_dtd;
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
 
1030,21 → 1099,16
intel_sdvo->attached_output))
return;
 
/* We have tried to get input timing in mode_fixup, and filled into
* adjusted_mode.
*/
if (intel_sdvo->is_tv || intel_sdvo->is_lvds) {
input_dtd = intel_sdvo->input_dtd;
} else {
/* Set the output timing to the screen */
if (!intel_sdvo_set_target_output(intel_sdvo,
intel_sdvo->attached_output))
return;
/* lvds has a special fixed output timing. */
if (intel_sdvo->is_lvds)
intel_sdvo_get_dtd_from_mode(&output_dtd,
intel_sdvo->sdvo_lvds_fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
DRM_INFO("Setting output timings on %s failed\n",
SDVO_NAME(intel_sdvo));
 
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
(void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd);
}
 
/* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
1061,7 → 1125,15
!intel_sdvo_set_tv_format(intel_sdvo))
return;
 
(void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd);
/* We have tried to get input timing in mode_fixup, and filled into
* adjusted_mode.
*/
intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
if (intel_sdvo->is_tv || intel_sdvo->is_lvds)
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
DRM_INFO("Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
 
switch (pixel_multiplier) {
default:
1116,26 → 1188,66
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
 
static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct intel_sdvo_connector *intel_sdvo_connector =
to_intel_sdvo_connector(&connector->base);
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base);
u16 active_outputs;
 
intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
 
if (active_outputs & intel_sdvo_connector->output_flag)
return true;
else
return false;
}
 
static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u32 tmp;
 
tmp = I915_READ(intel_sdvo->sdvo_reg);
 
if (!(tmp & SDVO_ENABLE))
return false;
 
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
 
return true;
}
 
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
u32 temp;
 
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_OFF);
 
if (mode == DRM_MODE_DPMS_OFF) {
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
} else {
 
static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
bool input1, input2;
int i;
u8 status;
1157,12 → 1269,53
}
 
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
 
static void intel_sdvo_dpms(struct drm_connector *connector, int mode)
{
struct drm_crtc *crtc;
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 
/* dvo supports only 2 dpms states. */
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
 
if (mode == connector->dpms)
return;
 
connector->dpms = mode;
 
/* Only need to change hw state when actually enabled */
crtc = intel_sdvo->base.base.crtc;
if (!crtc) {
intel_sdvo->base.connectors_active = false;
return;
}
 
if (mode != DRM_MODE_DPMS_ON) {
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
 
intel_sdvo->base.connectors_active = false;
 
intel_crtc_update_dpms(crtc);
} else {
intel_sdvo->base.connectors_active = true;
 
intel_crtc_update_dpms(crtc);
 
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo, mode);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
}
 
intel_modeset_check_state(connector->dev);
}
 
static int intel_sdvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
1225,12 → 1378,21
return true;
}
 
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
{
u8 response[2];
struct drm_device *dev = intel_sdvo->base.base.dev;
uint16_t hotplug;
 
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0];
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
return 0;
 
if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&hotplug, sizeof(hotplug)))
return 0;
 
return hotplug;
}
 
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
1237,7 → 1399,8
{
struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
 
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
}
 
static bool
1261,10 → 1424,11
struct drm_i915_private *dev_priv = connector->dev->dev_private;
 
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
intel_gmbus_get_adapter(dev_priv,
dev_priv->crt_ddc_pin));
}
 
enum drm_connector_status
static enum drm_connector_status
intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1312,14 → 1476,13
}
} else
status = connector_status_disconnected;
connector->display_info.raw_edid = NULL;
kfree(edid);
}
 
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio)
intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
if (intel_sdvo_connector->force_audio != HDMI_AUDIO_AUTO)
intel_sdvo->has_hdmi_audio = (intel_sdvo_connector->force_audio == HDMI_AUDIO_ON);
}
 
return status;
1350,9 → 1513,8
return connector_status_unknown;
 
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags &
(SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
msleep(30);
 
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
1387,7 → 1549,6
else
ret = connector_status_disconnected;
 
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
ret = connector_status_connected;
1433,7 → 1594,6
drm_add_edid_modes(connector, edid);
}
 
connector->display_info.raw_edid = NULL;
kfree(edid);
}
}
1571,9 → 1731,6
intel_sdvo->sdvo_lvds_fixed_mode =
drm_mode_duplicate(connector->dev, newmode);
 
drm_mode_set_crtcinfo(intel_sdvo->sdvo_lvds_fixed_mode,
0);
 
intel_sdvo->is_lvds = true;
break;
}
1651,9 → 1808,22
kfree(connector);
}
 
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
{
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
struct edid *edid;
bool has_audio = false;
 
if (!intel_sdvo->is_hdmi)
return false;
 
edid = intel_sdvo_get_edid(connector);
if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
 
return has_audio;
}
 
static int
intel_sdvo_set_property(struct drm_connector *connector,
1681,10 → 1851,10
 
intel_sdvo_connector->force_audio = i;
 
if (i == 0)
if (i == HDMI_AUDIO_AUTO)
has_audio = intel_sdvo_detect_hdmi_audio(connector);
else
has_audio = i > 0;
has_audio = (i == HDMI_AUDIO_ON);
 
if (has_audio == intel_sdvo->has_hdmi_audio)
return 0;
1797,8 → 1967,8
done:
if (intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
crtc->y, crtc->fb);
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
 
return 0;
1806,15 → 1976,13
}
 
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.dpms = intel_sdvo_dpms,
.mode_fixup = intel_sdvo_mode_fixup,
.prepare = intel_encoder_prepare,
.mode_set = intel_sdvo_mode_set,
.commit = intel_encoder_commit,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.dpms = intel_sdvo_dpms,
.detect = intel_sdvo_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = intel_sdvo_set_property,
1892,7 → 2060,7
{
struct sdvo_device_mapping *mapping;
 
if (IS_SDVOB(reg))
if (sdvo->is_sdvob)
mapping = &(dev_priv->sdvo_mappings[0]);
else
mapping = &(dev_priv->sdvo_mappings[1]);
1910,7 → 2078,7
struct sdvo_device_mapping *mapping;
u8 pin;
 
if (IS_SDVOB(reg))
if (sdvo->is_sdvob)
mapping = &dev_priv->sdvo_mappings[0];
else
mapping = &dev_priv->sdvo_mappings[1];
1919,12 → 2087,12
if (mapping->initialized)
pin = mapping->i2c_pin;
 
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
if (intel_gmbus_is_port_valid(pin)) {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
}
 
1935,12 → 2103,12
}
 
static u8
intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sdvo_device_mapping *my_mapping, *other_mapping;
 
if (IS_SDVOB(sdvo_reg)) {
if (sdvo->is_sdvob) {
my_mapping = &dev_priv->sdvo_mappings[0];
other_mapping = &dev_priv->sdvo_mappings[1];
} else {
1965,7 → 2133,7
/* No SDVO device info is found for another DVO port,
* so use mapping assumption we had before BIOS parsing.
*/
if (IS_SDVOB(sdvo_reg))
if (sdvo->is_sdvob)
return 0x70;
else
return 0x72;
1983,9 → 2151,10
drm_connector_helper_add(&connector->base.base,
&intel_sdvo_connector_helper_funcs);
 
connector->base.base.interlace_allowed = 0;
connector->base.base.interlace_allowed = 1;
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
 
intel_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
2024,17 → 2193,18
 
intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base;
if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) {
if (intel_sdvo_get_hotplug_support(intel_sdvo) &
intel_sdvo_connector->output_flag) {
connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_sdvo->hotplug_active[0] |= 1 << device;
intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
/* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
intel_encoder->hot_plug = intel_sdvo_enable_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
}
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
2042,8 → 2212,7
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo->base.cloneable = true;
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
2074,7 → 2243,7
 
intel_sdvo->is_tv = true;
intel_sdvo->base.needs_tv_clock = true;
intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
intel_sdvo->base.cloneable = false;
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
 
2117,8 → 2286,7
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
 
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo->base.cloneable = true;
 
intel_sdvo_connector_init(intel_sdvo_connector,
intel_sdvo);
2150,8 → 2318,8
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
 
intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
(1 << INTEL_SDVO_LVDS_CLONE_BIT));
/* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
intel_sdvo->base.cloneable = false;
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2190,6 → 2358,10
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_CVBS0))
return false;
 
if (flags & SDVO_OUTPUT_YPRPB0)
if (!intel_sdvo_tv_init(intel_sdvo, SDVO_OUTPUT_YPRPB0))
return false;
 
if (flags & SDVO_OUTPUT_RGB0)
if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false;
2275,10 → 2447,8
intel_sdvo_connector->max_##name = data_value[0]; \
intel_sdvo_connector->cur_##name = response; \
intel_sdvo_connector->name = \
drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
intel_sdvo_connector->name->values[0] = 0; \
intel_sdvo_connector->name->values[1] = data_value[0]; \
drm_connector_attach_property(connector, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
2312,25 → 2482,19
intel_sdvo_connector->left_margin = data_value[0] - response;
intel_sdvo_connector->right_margin = intel_sdvo_connector->left_margin;
intel_sdvo_connector->left =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"left_margin", 2);
drm_property_create_range(dev, 0, "left_margin", 0, data_value[0]);
if (!intel_sdvo_connector->left)
return false;
 
intel_sdvo_connector->left->values[0] = 0;
intel_sdvo_connector->left->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
 
intel_sdvo_connector->right =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"right_margin", 2);
drm_property_create_range(dev, 0, "right_margin", 0, data_value[0]);
if (!intel_sdvo_connector->right)
return false;
 
intel_sdvo_connector->right->values[0] = 0;
intel_sdvo_connector->right->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
2354,25 → 2518,21
intel_sdvo_connector->top_margin = data_value[0] - response;
intel_sdvo_connector->bottom_margin = intel_sdvo_connector->top_margin;
intel_sdvo_connector->top =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"top_margin", 2);
drm_property_create_range(dev, 0,
"top_margin", 0, data_value[0]);
if (!intel_sdvo_connector->top)
return false;
 
intel_sdvo_connector->top->values[0] = 0;
intel_sdvo_connector->top->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
 
intel_sdvo_connector->bottom =
drm_property_create(dev, DRM_MODE_PROP_RANGE,
"bottom_margin", 2);
drm_property_create_range(dev, 0,
"bottom_margin", 0, data_value[0]);
if (!intel_sdvo_connector->bottom)
return false;
 
intel_sdvo_connector->bottom->values[0] = 0;
intel_sdvo_connector->bottom->values[1] = data_value[0];
drm_connector_attach_property(connector,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
2401,12 → 2561,10
intel_sdvo_connector->max_dot_crawl = 1;
intel_sdvo_connector->cur_dot_crawl = response & 0x1;
intel_sdvo_connector->dot_crawl =
drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
drm_property_create_range(dev, 0, "dot_crawl", 0, 1);
if (!intel_sdvo_connector->dot_crawl)
return false;
 
intel_sdvo_connector->dot_crawl->values[0] = 0;
intel_sdvo_connector->dot_crawl->values[1] = 1;
drm_connector_attach_property(connector,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
2485,7 → 2643,7
intel_sdvo_init_ddc_proxy(struct intel_sdvo *sdvo,
struct drm_device *dev)
{
// sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.owner = THIS_MODULE;
sdvo->ddc.class = I2C_CLASS_DDC;
snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
sdvo->ddc.dev.parent = &dev->pdev->dev;
2495,11 → 2653,12
return 1; //i2c_add_adapter(&sdvo->ddc) == 0;
}
 
bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
struct intel_sdvo *intel_sdvo;
u32 hotplug_mask;
int i;
 
intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
2507,7 → 2666,8
return false;
 
intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
2524,38 → 2684,47
u8 byte;
 
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
DRM_DEBUG_KMS("No SDVO device found on %s\n",
SDVO_NAME(intel_sdvo));
goto err;
}
}
 
if (IS_SDVOB(sdvo_reg))
dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
hotplug_mask = 0;
if (IS_G4X(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X;
} else if (IS_GEN4(dev)) {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965;
} else {
hotplug_mask = intel_sdvo->is_sdvob ?
SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915;
}
 
drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs);
 
intel_encoder->disable = intel_disable_sdvo;
intel_encoder->enable = intel_enable_sdvo;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
 
/* In default case sdvo lvds is false */
if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps))
goto err;
 
/* Set up hotplug command - note paranoia about contents of reply.
* We assume that the hardware is in a sane state, and only touch
* the bits we think we understand.
*/
intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
intel_sdvo->hotplug_active[0] &= ~0x3;
 
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
IS_SDVOB(sdvo_reg) ? 'B' : 'C');
DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
SDVO_NAME(intel_sdvo));
goto err;
}
 
/* Only enable the hotplug irq if we need it, to work around noisy
* hotplug lines.
*/
if (intel_sdvo->hotplug_active)
dev_priv->hotplug_supported_mask |= hotplug_mask;
 
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg);
 
/* Set the input timing to the screen. Assume always input 0. */
/drivers/video/drm/i915/intel_sdvo_regs.h
61,6 → 61,11
u16 output_flags;
} __attribute__((packed));
 
/* Note: SDVO detailed timing flags match EDID misc flags. */
#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
#define DTD_FLAG_INTERLACE (1 << 7)
 
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
703,6 → 708,8
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
#define SDVO_CMD_SET_HBUF_INDEX 0x93
#define SDVO_HBUF_INDEX_ELD 0
#define SDVO_HBUF_INDEX_AVI_IF 1
#define SDVO_CMD_GET_HBUF_INDEX 0x94
#define SDVO_CMD_GET_HBUF_INFO 0x95
#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
/drivers/video/drm/i915/intel_sprite.c
29,11 → 29,11
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fourcc.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include "intel_drv.h"
#include "i915_drm.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
static void
56,14 → 56,15
sprctl &= ~SPRITE_PIXFORMAT_MASK;
sprctl &= ~SPRITE_RGB_ORDER_RGBX;
sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
sprctl &= ~SPRITE_TILED;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888;
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
84,7 → 85,7
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
sprctl |= DVS_FORMAT_RGBX888;
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
}
95,7 → 96,6
/* must disable */
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
sprctl |= SPRITE_DEST_KEY;
 
/* Sizes are 0 based */
src_w--;
111,15 → 111,19
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
if (!dev_priv->sprite_scaling_enabled) {
dev_priv->sprite_scaling_enabled = true;
sandybridge_update_wm(dev);
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
if (dev_priv->sprite_scaling_enabled) {
dev_priv->sprite_scaling_enabled = false;
/* potentially re-enable LP watermarks */
sandybridge_update_wm(dev);
intel_update_watermarks(dev);
}
}
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
134,7 → 138,7
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
POSTING_READ(SPRSURF(pipe));
}
 
150,8 → 154,11
/* Can't leave the scaler enabled... */
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_WRITE(SPRSURF(pipe), 0);
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
 
dev_priv->sprite_scaling_enabled = false;
intel_update_watermarks(dev);
}
 
static int
209,7 → 216,7
}
 
static void
snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
219,22 → 226,23
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
u32 dvscntr, dvsscale = 0;
u32 dvscntr, dvsscale;
 
dvscntr = I915_READ(DVSCNTR(pipe));
 
/* Mask out pixel format bits in case we change it */
dvscntr &= ~DVS_PIXFORMAT_MASK;
dvscntr &= ~DVS_RGB_ORDER_RGBX;
dvscntr &= ~DVS_RGB_ORDER_XBGR;
dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
dvscntr &= ~DVS_TILED;
 
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888;
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
263,8 → 271,8
if (obj->tiling_mode != I915_TILING_NONE)
dvscntr |= DVS_TILED;
 
/* must disable */
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
if (IS_GEN6(dev))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
 
/* Sizes are 0 based */
275,7 → 283,8
 
intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
 
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
291,12 → 300,12
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
POSTING_READ(DVSSURF(pipe));
}
 
static void
snb_disable_plane(struct drm_plane *plane)
ilk_disable_plane(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
307,7 → 316,7
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
/* Flush double buffered register updates */
I915_WRITE(DVSSURF(pipe), 0);
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
}
 
319,6 → 328,12
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
if (!intel_crtc->primary_disabled)
return;
 
intel_crtc->primary_disabled = false;
intel_update_fbc(dev);
 
I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
}
 
330,11 → 345,17
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int reg = DSPCNTR(intel_crtc->plane);
 
if (intel_crtc->primary_disabled)
return;
 
I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
 
intel_crtc->primary_disabled = true;
intel_update_fbc(dev);
}
 
static int
snb_update_colorkey(struct drm_plane *plane,
ilk_update_colorkey(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
363,7 → 384,7
}
 
static void
snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
411,6 → 432,9
 
old_obj = intel_plane->obj;
 
src_w = src_w >> 16;
src_h = src_h >> 16;
 
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
return -EINVAL;
475,18 → 499,14
* Be sure to re-enable the primary before the sprite is no longer
* covering it fully.
*/
if (!disable_primary && intel_plane->primary_disabled) {
if (!disable_primary)
intel_enable_primary(crtc);
intel_plane->primary_disabled = false;
}
 
intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
crtc_w, crtc_h, x, y, src_w, src_h);
 
if (disable_primary) {
if (disable_primary)
intel_disable_primary(crtc);
intel_plane->primary_disabled = true;
}
 
/* Unpin old obj after new one is active to avoid ugliness */
if (old_obj) {
501,7 → 521,7
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
mutex_lock(&dev->struct_mutex);
}
i915_gem_object_unpin(old_obj);
intel_unpin_fb_obj(old_obj);
}
 
out_unlock:
517,11 → 537,8
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
 
if (intel_plane->primary_disabled) {
if (plane->crtc)
intel_enable_primary(plane->crtc);
intel_plane->primary_disabled = false;
}
 
intel_plane->disable_plane(plane);
 
if (!intel_plane->obj)
528,7 → 545,7
goto out;
 
mutex_lock(&dev->struct_mutex);
i915_gem_object_unpin(intel_plane->obj);
intel_unpin_fb_obj(intel_plane->obj);
intel_plane->obj = NULL;
mutex_unlock(&dev->struct_mutex);
out:
548,14 → 565,13
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *set = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
 
if (!dev_priv)
return -EINVAL;
// if (!drm_core_check_feature(dev, DRIVER_MODESET))
// return -ENODEV;
 
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
582,14 → 598,13
struct drm_file *file_priv)
{
struct drm_intel_sprite_colorkey *get = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_object *obj;
struct drm_plane *plane;
struct intel_plane *intel_plane;
int ret = 0;
 
if (!dev_priv)
return -EINVAL;
// if (!drm_core_check_feature(dev, DRIVER_MODESET))
// return -ENODEV;
 
mutex_lock(&dev->mode_config.mutex);
 
614,6 → 629,14
.destroy = intel_destroy_plane,
};
 
static uint32_t ilk_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
 
static uint32_t snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
628,9 → 651,11
{
struct intel_plane *intel_plane;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
int num_plane_formats;
int ret;
 
if (!(IS_GEN6(dev) || IS_GEN7(dev)))
if (INTEL_INFO(dev)->gen < 5)
return -ENODEV;
 
intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
637,28 → 662,48
if (!intel_plane)
return -ENOMEM;
 
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
intel_plane->update_colorkey = ilk_update_colorkey;
intel_plane->get_colorkey = ilk_get_colorkey;
 
if (IS_GEN6(dev)) {
intel_plane->max_downscale = 16;
intel_plane->update_plane = snb_update_plane;
intel_plane->disable_plane = snb_disable_plane;
intel_plane->update_colorkey = snb_update_colorkey;
intel_plane->get_colorkey = snb_get_colorkey;
} else if (IS_GEN7(dev)) {
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
} else {
plane_formats = ilk_plane_formats;
num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
}
break;
 
case 7:
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
intel_plane->update_colorkey = ivb_update_colorkey;
intel_plane->get_colorkey = ivb_get_colorkey;
 
plane_formats = snb_plane_formats;
num_plane_formats = ARRAY_SIZE(snb_plane_formats);
break;
 
default:
kfree(intel_plane);
return -ENODEV;
}
 
intel_plane->pipe = pipe;
possible_crtcs = (1 << pipe);
ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
&intel_plane_funcs, snb_plane_formats,
ARRAY_SIZE(snb_plane_formats), false);
&intel_plane_funcs,
plane_formats, num_plane_formats,
false);
if (ret)
kfree(intel_plane);
 
return ret;
}
 
/drivers/video/drm/i915/kms_display.c
62,6 → 62,9
void (__stdcall *restore_cursor)(int x, int y);
void (*disable_mouse)(void);
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
 
};
 
 
83,6 → 86,132
void disable_mouse(void)
{};
 
static char *manufacturer_name(unsigned char *x)
{
static char name[4];
 
name[0] = ((x[0] & 0x7C) >> 2) + '@';
name[1] = ((x[0] & 0x03) << 3) + ((x[1] & 0xE0) >> 5) + '@';
name[2] = (x[1] & 0x1F) + '@';
name[3] = 0;
 
return name;
}
 
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
videomode_t *reqmode, bool strict)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_fb_helper *fb_helper = &dev_priv->fbdev->helper;
 
struct drm_mode_config *config = &dev->mode_config;
struct drm_display_mode *mode = NULL, *tmpmode;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_mode_set set;
char *con_name;
char *enc_name;
unsigned hdisplay, vdisplay;
int ret;
 
mutex_lock(&dev->mode_config.mutex);
 
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) &&
(drm_mode_vrefresh(tmpmode) == reqmode->freq) )
{
mode = tmpmode;
goto do_set;
}
};
 
if( (mode == NULL) && (strict == false) )
{
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) )
{
mode = tmpmode;
goto do_set;
}
};
};
 
printf("%s failed\n", __FUNCTION__);
 
return -1;
 
do_set:
 
 
encoder = connector->encoder;
crtc = encoder->crtc;
 
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
 
DRM_DEBUG_KMS("set mode %d %d: crtc %d connector %s encoder %s\n",
reqmode->width, reqmode->height, crtc->base.id,
con_name, enc_name);
 
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
hdisplay = mode->hdisplay;
vdisplay = mode->vdisplay;
 
if (crtc->invert_dimensions)
swap(hdisplay, vdisplay);
 
fb = fb_helper->fb;
 
fb->width = reqmode->width;
fb->height = reqmode->height;
fb->pitches[0] = ALIGN(reqmode->width * 4, 64);
fb->pitches[1] = ALIGN(reqmode->width * 4, 64);
fb->pitches[2] = ALIGN(reqmode->width * 4, 64);
fb->pitches[3] = ALIGN(reqmode->width * 4, 64);
 
fb->bits_per_pixel = 32;
fb->depth = 24;
 
crtc->fb = fb;
crtc->enabled = true;
os_display->crtc = crtc;
 
set.crtc = crtc;
set.x = 0;
set.y = 0;
set.mode = mode;
set.connectors = &connector;
set.num_connectors = 1;
set.fb = fb;
ret = crtc->funcs->set_config(&set);
mutex_unlock(&dev->mode_config.mutex);
 
if ( !ret )
{
os_display->width = fb->width;
os_display->height = fb->height;
os_display->pitch = fb->pitches[0];
os_display->vrefresh = drm_mode_vrefresh(mode);
 
sysSetScreen(fb->width, fb->height, fb->pitches[0]);
 
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitches[0]);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
fb->width, fb->height, crtc);
 
 
return ret;
}
 
static int count_connector_modes(struct drm_connector* connector)
{
struct drm_display_mode *mode;
95,6 → 224,47
return count;
};
 
static struct drm_connector* get_def_connector(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
 
struct drm_connector *def_connector = NULL;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
if( connector->status != connector_status_connected)
continue;
 
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if( encoder == NULL)
continue;
 
connector->encoder = encoder;
 
crtc = encoder->crtc;
 
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x",
connector, connector->base.id,
connector->status, connector->encoder,
crtc);
 
// if (crtc == NULL)
// continue;
 
def_connector = connector;
 
break;
};
 
return def_connector;
};
 
 
int init_display_kms(struct drm_device *dev)
{
struct drm_connector *connector;
106,7 → 276,7
cursor_t *cursor;
u32_t ifl;
 
ENTER();
// ENTER();
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
122,13 → 292,13
continue;
}
connector->encoder = encoder;
crtc = encoder->crtc;
 
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x\n",
dbgprintf("CONNECTOR %x ID:%d status:%d ENCODER %x CRTC %x ID:%d\n",
connector, connector->base.id,
connector->status, connector->encoder,
encoder->crtc);
crtc, crtc->base.id );
 
crtc = encoder->crtc;
break;
};
 
165,7 → 335,6
DRM_DEBUG_KMS("[Select CRTC:%d]\n", crtc->base.id);
 
os_display = GetDisplay();
 
os_display->ddev = dev;
os_display->connector = connector;
os_display->crtc = crtc;
201,6 → 370,8
#define BLT_WRITE_ALPHA (1<<21)
#define BLT_WRITE_RGB (1<<20)
 
#if 0
 
#if 1
{
 
209,9 → 380,9
struct intel_ring_buffer *ring;
 
obj = i915_gem_alloc_object(dev, 4096);
i915_gem_object_pin(obj, 4096, true);
i915_gem_object_pin(obj, 4096, true, true);
 
cmd_buffer = MapIoMem((addr_t)obj->pages[0], 4096, PG_SW|PG_NOCACHE);
cmd_buffer = MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW|PG_NOCACHE);
cmd_offset = obj->gtt_offset;
};
#endif
227,122 → 398,21
};
 
sna_init();
#endif
 
LEAVE();
// LEAVE();
 
return 0;
};
 
 
bool set_mode(struct drm_device *dev, struct drm_connector *connector,
videomode_t *reqmode, bool strict)
{
struct drm_display_mode *mode = NULL, *tmpmode;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_fb_helper *fb_helper = &dev_priv->fbdev->helper;
 
bool ret = false;
 
ENTER();
 
dbgprintf("width %d height %d vrefresh %d\n",
reqmode->width, reqmode->height, reqmode->freq);
 
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) &&
(drm_mode_vrefresh(tmpmode) == reqmode->freq) )
{
mode = tmpmode;
goto do_set;
}
};
 
if( (mode == NULL) && (strict == false) )
{
list_for_each_entry(tmpmode, &connector->modes, head)
{
if( (drm_mode_width(tmpmode) == reqmode->width) &&
(drm_mode_height(tmpmode) == reqmode->height) )
{
mode = tmpmode;
goto do_set;
}
};
};
 
do_set:
 
if( mode != NULL )
{
struct drm_framebuffer *fb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
 
char *con_name;
char *enc_name;
 
encoder = connector->encoder;
crtc = encoder->crtc;
 
con_name = drm_get_connector_name(connector);
enc_name = drm_get_encoder_name(encoder);
 
dbgprintf("set mode %d %d connector %s encoder %s\n",
reqmode->width, reqmode->height, con_name, enc_name);
 
fb = fb_helper->fb;
 
fb->width = reqmode->width;
fb->height = reqmode->height;
fb->pitches[0] = ALIGN(reqmode->width * 4, 64);
fb->pitches[1] = ALIGN(reqmode->width * 4, 64);
fb->pitches[2] = ALIGN(reqmode->width * 4, 64);
fb->pitches[3] = ALIGN(reqmode->width * 4, 64);
 
fb->bits_per_pixel = 32;
fb->depth == 24;
 
crtc->fb = fb;
crtc->enabled = true;
os_display->crtc = crtc;
 
ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb);
 
// select_cursor_kms(rdisplay->cursor);
// radeon_show_cursor_kms(crtc);
 
if (ret == true)
{
os_display->width = fb->width;
os_display->height = fb->height;
os_display->pitch = fb->pitches[0];
os_display->vrefresh = drm_mode_vrefresh(mode);
 
sysSetScreen(fb->width, fb->height, fb->pitches[0]);
 
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitches[0]);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
fb->width, fb->height, crtc);
}
 
LEAVE();
return ret;
};
 
 
 
int get_videomodes(videomode_t *mode, int *count)
{
int err = -1;
 
ENTER();
// ENTER();
 
dbgprintf("mode %x count %d\n", mode, *count);
// dbgprintf("mode %x count %d\n", mode, *count);
 
if( *count == 0 )
{
373,7 → 443,7
*count = i;
err = 0;
};
LEAVE();
// LEAVE();
return err;
};
 
381,10 → 451,10
{
int err = -1;
 
ENTER();
// ENTER();
 
dbgprintf("width %d height %d vrefresh %d\n",
mode->width, mode->height, mode->freq);
// dbgprintf("width %d height %d vrefresh %d\n",
// mode->width, mode->height, mode->freq);
 
if( (mode->width != 0) &&
(mode->height != 0) &&
397,7 → 467,7
err = 0;
};
 
LEAVE();
// LEAVE();
return err;
};
 
421,7 → 491,7
int i,j;
int ret;
 
ENTER();
// ENTER();
 
if (dev_priv->info->cursor_needs_physical)
{
436,7 → 506,7
if (unlikely(obj == NULL))
return -ENOMEM;
 
ret = i915_gem_object_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true);
ret = i915_gem_object_pin(obj, CURSOR_WIDTH*CURSOR_HEIGHT*4, true, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
445,7 → 515,7
/* You don't need to worry about fragmentation issues.
* GTT space is continuous. I guarantee it. */
 
bits = (u32*)MapIoMem(get_bus_addr() + obj->gtt_offset,
bits = (u32*)MapIoMem(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW);
 
if (unlikely(bits == NULL))
476,7 → 546,7
cursor->data = bits;
 
cursor->header.destroy = destroy_cursor;
LEAVE();
// LEAVE();
 
return 0;
}
587,6 → 657,7
return old;
};
 
#if 0
 
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6)
 
1060,7 → 1131,7
{
u8* src_offset;
u8* dst_offset;
u32 slot = *((u8*)CURRENT_TASK);
u32 slot;
u32 ifl;
 
ret = gem_object_lock(mask_bitmap->obj);
1070,12 → 1141,15
return ret;
};
 
printf("width %d height %d\n", winrc.right, winrc.bottom);
// printf("width %d height %d\n", winrc.right, winrc.bottom);
 
mask_bitmap->width = winrc.right;
mask_bitmap->height = winrc.bottom;
mask_bitmap->pitch = ALIGN(w,64);
 
slot = *((u8*)CURRENT_TASK);
// slot = 0x01;
 
slot|= (slot<<8)|(slot<<16)|(slot<<24);
 
 
1083,7 → 1157,7
"movd %[slot], %%xmm6 \n"
"punpckldq %%xmm6, %%xmm6 \n"
"punpcklqdq %%xmm6, %%xmm6 \n"
:: [slot] "g" (slot)
:: [slot] "m" (slot)
:"xmm6");
 
src_offset = mask_bitmap->uaddr;
1252,6 → 1326,7
return context_map[slot];
}
 
#endif
 
 
 
1260,7 → 1335,6
 
 
 
 
void __stdcall run_workqueue(struct workqueue_struct *cwq)
{
unsigned long irqflags;
1373,7 → 1447,29
return NULL;
}
 
#define NSEC_PER_SEC 1000000000L
 
void getrawmonotonic(struct timespec *ts)
{
u32 tmp = GetTimerTicks();
 
ts->tv_sec = tmp/100;
ts->tv_nsec = (tmp - ts->tv_sec*100)*10000000;
}
 
void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
{
while (nsec >= NSEC_PER_SEC) {
nsec -= NSEC_PER_SEC;
++sec;
}
while (nsec < 0) {
nsec += NSEC_PER_SEC;
--sec;
}
ts->tv_sec = sec;
ts->tv_nsec = nsec;
}
 
 
 
/drivers/video/drm/i915/main.c
1,10 → 1,9
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include <drm/drmP.h>
#include <drm.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
//#include "intel_drv.h"
 
 
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
52,8 → 51,8
 
if(!dbg_open(log))
{
// strcpy(log, "/RD/1/DRIVERS/i915.log");
strcpy(log, "/HD1/2/i915.log");
strcpy(log, "/RD/1/DRIVERS/i915.log");
// strcpy(log, "/BD1/2/i915.log");
 
if(!dbg_open(log))
{
61,7 → 60,7
return 0;
};
}
dbgprintf("i915 blitter preview\n cmdline: %s\n", cmdline);
dbgprintf("i915 preview #08\n cmdline: %s\n", cmdline);
 
cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size);
73,7 → 72,6
if(err)
{
dbgprintf("Epic Fail :(/n");
 
};
 
err = RegService("DISPLAY", display_handler);
129,8 → 127,8
break;
 
case SRV_ENUM_MODES:
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size );
// dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
// inp, io->inp_size, io->out_size );
check_output(4);
// check_input(*outp * sizeof(videomode_t));
if( i915_modeset)
138,13 → 136,13
break;
 
case SRV_SET_MODE:
dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
inp, io->inp_size);
// dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
// inp, io->inp_size);
check_input(sizeof(videomode_t));
if( i915_modeset )
retval = set_user_mode((videomode_t*)inp);
break;
 
#if 0
case SRV_GET_CAPS:
retval = get_driver_caps((hwcaps_t*)inp);
break;
168,6 → 166,8
 
retval = 0;
break;
#endif
 
};
 
return retval;
/drivers/video/drm/i915/pci.c
677,20 → 677,7
region->end = res->end;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
{
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
}
 
int pci_enable_rom(struct pci_dev *pdev)
{
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
844,3 → 831,50
pci_disable_rom(pdev);
}
 
#if 0
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
 
/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
if (pci_is_pcie(dev))
return;
 
pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
if (lat < 16)
lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
else if (lat > pcibios_max_latency)
lat = pcibios_max_latency;
else
return;
dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
}
#endif
 
 
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
 
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
dbgprintf("%s bus mastering\n",
enable ? "enabling" : "disabling");
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
}
 
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
// pcibios_set_master(dev);
}
 
 
 
/drivers/video/drm/radeon/cursor.S
File deleted
/drivers/video/drm/radeon/Makefile
3,7 → 3,7
CC = gcc
LD = ld
AS = as
FASM = e:/fasm/fasm.exe
FASM = fasm.exe
 
DEFINES = -D__KERNEL__ -DCONFIG_X86_32
 
/drivers/video/drm/radeon/Makefile.lto
42,23 → 42,24
 
NAME_SRC= \
pci.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_stub.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
tracker/bitmap.c \
r700_vs.c \
r600_video.c \
radeon_device.c \
evergreen.c \
evergreen_blit_shaders.c \
evergreen_blit_kms.c \
evergreen_hdmi.c \
cayman_blit_shaders.c \
radeon_clocks.c \
atom.c \
72,6 → 73,8
radeon_connectors.c \
atombios_crtc.c \
atombios_dp.c \
atombios_encoders.c \
atombios_i2c.c \
radeon_encoders.c \
radeon_fence.c \
radeon_gem.c \
84,6 → 87,8
radeon_gart.c \
radeon_ring.c \
radeon_object_kos.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_pm.c \
r100.c \
r200.c \
92,7 → 97,6
rv515.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_kms.c \
r600_blit_shaders.c \
r600_hdmi.c \
104,6 → 108,8
rdisplay.c \
rdisplay_kms.c \
cmdline.c \
si.c \
si_blit_shaders.c \
fwblob.asm
 
FW_BINS= \
/drivers/video/drm/radeon/atombios_crtc.c
1696,8 → 1696,7
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
if (ASIC_IS_AVIVO(rdev)) {
} else if (ASIC_IS_AVIVO(rdev)) {
/* in DP mode, the DP ref clock can come from either PPLL
* depending on the asic:
* DCE3: PPLL1 or PPLL2
1715,10 → 1714,20
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
/* the order shouldn't matter here, but we probably
* need this until we have atomic modeset
*/
if (rdev->flags & RADEON_IS_IGP) {
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
} else {
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
}
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
1726,7 → 1735,6
return radeon_crtc->crtc_id;
}
}
}
 
void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
{
/drivers/video/drm/radeon/evergreen.c
1285,7 → 1285,7
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled) {
if (save->crtc_enabled[i]) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
/drivers/video/drm/radeon/evergreend.h
91,6 → 91,10
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
 
#define CP_STRMOUT_CNTL 0x84FC
 
#define CP_COHER_CNTL 0x85F0
#define CP_COHER_SIZE 0x85F4
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
/drivers/video/drm/radeon/pci.c
676,35 → 676,7
region->end = res->end;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
{
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
}
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
 
 
int pci_enable_rom(struct pci_dev *pdev)
{
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
/drivers/video/drm/radeon/radeon_benchmark.c
41,7 → 41,7
struct radeon_fence *fence = NULL;
int i, r;
 
start_jiffies = jiffies;
start_jiffies = GetTimerTicks();
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
/drivers/video/drm/radeon/radeon_device.c
1452,13 → 1452,6
return err;
};
 
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{};
 
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{};
 
 
#define PCI_CLASS_REVISION 0x08
#define PCI_CLASS_DISPLAY_VGA 0x0300
 
/drivers/video/drm/radeon/radeon_fence.c
351,7 → 351,7
 
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = jiffies;
rdev->fence_drv[i].last_activity = GetTimerTicks();
}
 
/* mark the ring as not ready any more */
811,7 → 811,7
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].last_activity = GetTimerTicks();
rdev->fence_drv[ring].initialized = false;
}
 
/drivers/video/drm/radeon/si.c
2474,6 → 2474,7
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
case CP_STRMOUT_CNTL:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_ESGS_RING_SIZE:
/drivers/video/drm/radeon/sid.h
424,6 → 424,7
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
 
#define CP_STRMOUT_CNTL 0x84FC
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508